filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_11707 | #!/usr/bin/env python3
from typing import List
from reagent import types as rlt
from reagent.core.dataclasses import dataclass, field
from reagent.models.base import ModelBase
from reagent.models.dqn import FullyConnectedDQN
from reagent.net_builder.discrete_dqn_net_builder import DiscreteDQNNetBuilder
from reagent.parameters import NormalizationData, param_hash
@dataclass
class FullyConnected(DiscreteDQNNetBuilder):
__hash__ = param_hash
sizes: List[int] = field(default_factory=lambda: [256, 128])
activations: List[str] = field(default_factory=lambda: ["relu", "relu"])
dropout_ratio: float = 0.0
use_batch_norm: bool = False
def __post_init_post_parse__(self):
super().__init__()
assert len(self.sizes) == len(self.activations), (
f"Must have the same numbers of sizes and activations; got: "
f"{self.sizes}, {self.activations}"
)
def build_q_network(
self,
state_feature_config: rlt.ModelFeatureConfig,
state_normalization_data: NormalizationData,
output_dim: int,
) -> ModelBase:
state_dim = self._get_input_dim(state_normalization_data)
return FullyConnectedDQN(
state_dim=state_dim,
action_dim=output_dim,
sizes=self.sizes,
activations=self.activations,
dropout_ratio=self.dropout_ratio,
use_batch_norm=self.use_batch_norm,
)
|
the-stack_0_11708 | import pandas as pd
import qcportal as ptl
from simtk import unit
PARTICLE = unit.mole.create_unit(
6.02214076e23 ** -1,
"particle",
"particle",
)
HARTREE_PER_PARTICLE = unit.hartree / PARTICLE
HARTREE_TO_KCALMOL = HARTREE_PER_PARTICLE.conversion_factor_to(
unit.kilocalorie_per_mole
)
def main():
# Define the qcfractal server instance to download data from the datasets:
# 1. OpenFF Theory Benchmarking Set v1.1 - which contain the torsiondrives at different levels of theory
# link (https://github.com/openforcefield/qca-dataset-submission/tree/master/submissions/2020-12-18-OpenFF-Theory
# -Benchmarking-Set-v1.0)
client = ptl.FractalClient()
ds = client.get_collection(
"TorsionDriveDataset", "OpenFF Theory Benchmarking Set v1.0"
)
specifications = ds.list_specifications().index.to_list()
print(specifications)
# Create a dataframe to store the torsiondrives data
df = pd.DataFrame(columns=specifications)
for i, entry_index in enumerate(ds.df.index):
for spec_name in specifications:
data_entry = ds.get_entry(entry_index)
td_record_id = data_entry.object_map[spec_name]
td_dict = {}
td_dict["td_record_id"] = td_record_id
td_dict["attributes"] = data_entry.attributes
td_dict["mapped_smiles"] = data_entry.attributes[
"canonical_isomeric_explicit_hydrogen_mapped_smiles"
]
df.loc[entry_index + str(i), spec_name] = [td_dict]
td_record = client.query_procedures(td_record_id)[0]
print(f"{i:5d} : {entry_index:50s} status {td_record.status}")
if td_record.status == "COMPLETE":
angles = []
energies = []
dipoles = []
quadrupoles = []
for key, value in td_record.get_final_energies().items():
angles.append(key[0])
energies.append(value)
dipoles.append(
td_record.get_final_results()[key].extras["qcvars"][
"SCF DIPOLE"
]
)
quadrupoles.append(
td_record.get_final_results()[key].extras["qcvars"][
"SCF QUADRUPOLE"
]
)
angles, energies, dipoles, quadrupoles = zip(
*sorted(zip(angles, energies, dipoles, quadrupoles))
)
energy_min = min(energies)
relative_energies = [(x - energy_min) for x in energies]
dihedrals = td_record.keywords.dict()["dihedrals"][0]
df.loc[entry_index + str(i), spec_name][0].update(
{
"initial_molecules": client.query_molecules(
td_record.initial_molecule
),
"final_molecules": td_record.get_final_molecules(),
"final_energies": td_record.get_final_energies(),
"angles": angles,
"relative_energies": relative_energies,
"dipoles": dipoles,
"quadrupoles": quadrupoles,
"dihedrals": dihedrals,
"keywords": td_record.keywords.dict(),
}
)
# saving it to a pickle file
df.to_pickle("./torsiondrive_data.pkl")
if __name__ == "__main__":
main()
|
the-stack_0_11709 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# @File: cms.py
"""
envlib.cms
~~~~~~~~~~
Cms配置类预置库
"""
import json as json_tool
from copy import deepcopy
from envlib.env.envlogging import logger
from envlib.env.globals import current_app as app
from envlib.env.globals import g
from envlib.env.helpers import GetKeysMixin
from envlib.env_resources.preset_data import CMS_STORAGE_DIRECTORY, CMS_STORAGE_TYPE, cms_system_config_data, \
cms_archive_config_data
from envlib.envsetup.storage import Storage
from envlib.util import get_last_ip_str
from resources.data import STORAGE_CONFIG
__all__ = ['Cms', ]
class Cms(GetKeysMixin):
"""Cms配置类"""
def __init__(self):
pass
@classmethod
def query_cms_platform_config_by_rest(cls, check=False):
"""查询cms,系统配置,平台配置
查询结果绑定到 当前运行的Env实例关联的上下文环境信息AppCtxGlobals实例的代理 ``g`` 下::
key='cms_platform_config', value=查询接口返回值,cms,系统配置,平台配置
Args:
check (bool): 接口返回状态码校验,默认不校验
Returns:
rest接口返回值,cms,系统配置,平台配置
"""
res = app.send_by_rest('/api/demo@get')
app.bind_to_g(key='cms_platform_config', value=json_tool.loads(res.get('value')), lock=False)
return json_tool.loads(res.get('value'))
@classmethod
def config_cms_platform_by_rest(cls, json=cms_system_config_data, check=False):
"""cms,系统配置,平台配置
Args:
json (any): json数据结构
check (bool): 接口返回状态码校验,默认不校验
Returns:
rest接口返回值
"""
_config_cms_platform_json = {
"key": "viid",
"value": json_tool.dumps(json)
}
res = app.send_by_rest('/api/demo@post', json=_config_cms_platform_json, check=check)
cls.query_cms_platform_config_by_rest()
return res
@classmethod
def query_cms_archive_config_by_rest(cls, check=False):
"""查询cms,系统配置,一人一档配置
查询结果绑定到 当前运行的Env实例关联的上下文环境信息AppCtxGlobals实例的代理 ``g`` 下::
key='cms_archive_config', value=查询接口返回值,cms,系统配置,一人一档配置
Args:
check (bool): 接口返回状态码校验,默认不校验
Returns:
rest接口返回值,cms,系统配置,一人一档配置
"""
res = app.send_by_rest('/api/demo@get', check=check)
app.bind_to_g(key='cms_archive_config', value=res, lock=False)
return res
@classmethod
def config_cms_archive_by_rest(cls, json=cms_archive_config_data, check=False):
"""cms,系统配置,一人一档
Args:
json (any): json数据结构
check (bool): 接口返回状态码校验,默认不校验
Returns:
rest接口返回值
"""
res = app.send_by_rest('/api/demo@put', json=json, check=check)
cls.query_cms_archive_config_by_rest()
return res
@classmethod
def query_cms_cloud_storage_list_by_rest(cls, check=False):
"""cms-查询存储集群列表
查询结果绑定到 当前运行的Env实例关联的上下文环境信息AppCtxGlobals实例的代理 ``g`` 下::
key='cms_cloud_storage_list', value=查询接口返回值,cms-查询存储集群列表
Args:
check (bool): 接口返回状态码校验,默认不校验
Returns:
rest接口返回值,cms-查询存储集群列表
"""
res = app.send_by_rest('/api/demo@get', check=check)
app.bind_to_g(key='cms_cloud_storage_list', value=res.get('data'), lock=False)
return res.get('data')
@classmethod
def add_cms_cloud_storage_by_rest_via_json(cls, json, check=False):
"""cms系统配置-云存储配置-添加存储集群
Args:
json (any): json数据结构
check (bool): 接口返回状态码校验,默认不校验
Returns:
rest接口返回值
"""
res = app.send_by_rest('/api/demo@post', json=json, check=check)
return res
@classmethod
def query_cms_cloud_storage_capacity_by_rest(cls, ip=STORAGE_CONFIG.get('cm_ip'), check=False):
"""
Args:
ip (str): ip
check (bool): 接口返回状态码校验,默认不校验
Returns:
rest接口返回值
"""
res = app.send_by_rest('/api/demo@get', params=f'ip={ip}&port=9001&protocal=0',
check=check)
return res.get('capacity')
@classmethod
def config_cms_cloud_storage_directory_by_rest_via_json(cls, json, check=False):
"""cms-存储集群存储目录配置
Args:
json (any): json数据结构
check (bool): 接口返回状态码校验,默认不校验
Returns:
rest接口返回值
"""
res = app.send_by_rest('/api/demo@post', json=json, check=check)
return res
@classmethod
def query_cms_cloud_storage_directory_by_rest_via_params(cls, params, check=False):
"""cms-查询存储集群存储目录配置
Args:
params (any): params数据结构
check (bool): 接口返回状态码校验,默认不校验
Returns:
rest接口返回值
"""
res = app.send_by_rest('/api/demo@get', params=params, check=check)
return res
@classmethod
def config_cms_cloud_storage_from_env_ini(cls):
"""cms,系统配置-云存储配置,根据env_ini中预设的存储集群,添加存储集群
Returns:
rest接口返回值
"""
_storage_list = cls.query_cms_cloud_storage_list_by_rest().get('data')
_exist_storage = [_storage for _storage in _storage_list if
_storage.get('storage_name') == STORAGE_CONFIG.get('cms_storage_name')]
if _exist_storage:
logger.warning(f"CMS已添加存储{STORAGE_CONFIG.get('cms_storage_name')},请勿重复添加!!")
else:
_storage_capacity = cls.query_cms_cloud_storage_capacity_by_rest(
ip=STORAGE_CONFIG.get('cm_ip'))
_set_storage_capacity = _storage_capacity if _storage_capacity else 30000
_cms_storage_add_json = {
"read_write_permission": 1,
"storage_id": get_last_ip_str(STORAGE_CONFIG.get('cm_ip')),
"storage_name": STORAGE_CONFIG.get('cms_storage_name'),
"storage_ip": STORAGE_CONFIG.get('cm_ip'),
"storage_port": 9001,
"storage_protocal": 0,
"storage_capacity": _set_storage_capacity,
"storage_desc": None
}
cls.add_cms_cloud_storage_by_rest_via_json(json=_cms_storage_add_json)
res = cls.query_cms_cloud_storage_list_by_rest()
return res
@classmethod
def get_storage_id_via_cm_ip(cls, cm_ip=STORAGE_CONFIG.get('cm_ip')):
"""通过cm ip获取存储id
Args:
cm_ip (str): cm ip
Returns:
rest接口返回值, 存储id
"""
cls.query_cms_cloud_storage_list_by_rest()
_storage_id = g.getk('cms_cloud_storage_list').extracting('storage_id', filter={'storage_ip': cm_ip})
if _storage_id is None:
cls.config_cms_cloud_storage_from_env_ini()
_storage_id = g.getk('cms_cloud_storage_list').extracting('storage_id', filter={'storage_ip': cm_ip})
return _storage_id
@classmethod
def config_cms_cloud_storage_directory_from_env_ini(cls):
"""cms,系统配置-云存储配置,根据env_ini中预设的存储集群,进行目录配置
Returns:
rest接口返回值
"""
_storage_id = cls.get_storage_id_via_cm_ip(cm_ip=STORAGE_CONFIG.get('cm_ip'))
for _bucket_name in CMS_STORAGE_DIRECTORY:
_quota = Storage.query_y3000_bucket_storage_quota_via_bucket_name(bucket_name=_bucket_name)
_quota = 200 if _quota == 0 else _quota
_bucket_id = CMS_STORAGE_TYPE.get(_bucket_name)
_query_storage_set = cls.query_cms_cloud_storage_directory_by_rest_via_params(
params=f'data_type={_bucket_id}&storage_id={_storage_id}')
_json = deepcopy(CMS_STORAGE_DIRECTORY.get(_bucket_name))
_json.update(storage_id=_storage_id)
_json.get('storage_info')[0].update(capacity=_quota)
if not _query_storage_set: # 未设置则调接口设置
cls.config_cms_cloud_storage_directory_by_rest_via_json(json=_json)
if __name__ == '__main__':
pass
|
the-stack_0_11717 | # -*- coding: utf-8 -*-
import numpy
from matplotlib import pyplot
def lif(v, ge, gi, i):
dv = (v * -0.01) + ge - gi + i
spk = v > 1
dv[spk] = -v[spk]
return dv, spk
def lif_net(num_neurons, duration):
offset = -numpy.linspace(0, 4 * numpy.pi, num_neurons)
offset[:num_neurons / 2] = -3 * numpy.pi
v = numpy.zeros((duration, num_neurons))
ge = numpy.zeros(num_neurons)
gi = numpy.zeros(num_neurons)
i = 0.019 * numpy.random.rand(duration, num_neurons)
spikes = numpy.zeros((duration, num_neurons))
v[0,:] = numpy.random.rand(num_neurons)
for t in numpy.arange(1, duration):
ge[num_neurons / 2:] = 0.15 * spikes[t-1,:num_neurons / 2]
gi = numpy.ones(num_neurons) * 0.001 * (numpy.sin(offset + t / 100) + 1)
dv, spikes[t,:] = lif(v[t-1,:], ge, gi, i[t,:])
v[t,:] = v[t-1,:] + dv
return spikes
spikes = lif_net(2000, 3000)
indices = numpy.where(spikes)
pyplot.figure()
ax = pyplot.subplot(121)
pyplot.scatter(indices[0][indices[1] < 1000], indices[1][indices[1] < 1000], marker='.', alpha=0.5)
indices = numpy.where(spikes)
pyplot.scatter(indices[0][indices[1] >= 1000], indices[1][indices[1] >= 1000], marker='.', alpha=0.5)
pyplot.xlabel('Time (ms)')
pyplot.yticks([])
pyplot.subplot(164)
pyplot.hist(indices[1], bins=50, orientation='horizontal')
pyplot.yticks([])
pyplot.xticks([])
pyplot.tight_layout()
|
the-stack_0_11718 | # -*- coding: utf-8 -*-
# file: BERT_SPC.py
# author: songyouwei <[email protected]>
# Copyright (C) 2019. All Rights Reserved.
import torch
import torch.nn as nn
class BERT_SPC(nn.Module):
def __init__(self, bert, opt):
super(BERT_SPC, self).__init__()
self.bert = bert
self.dropout = nn.Dropout(0.4)
self.dense = nn.Linear(opt.bert_dim, opt.polarities_dim)
def forward(self, inputs):
text_bert_indices, bert_segments_ids = inputs[0], inputs[1]
_, pooled_output = self.bert(text_bert_indices, token_type_ids=bert_segments_ids)
pooled_output = self.dropout(pooled_output)
logits = self.dense(pooled_output)
return logits
|
the-stack_0_11719 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Advanced Movie Selection for Dreambox-Enigma2
#
# The plugin is developed on the basis from a lot of single plugins (thx for the code @ all)
# Coded by JackDaniel @ cmikula (c)2011
# Support: www.i-have-a-dreambox.com
#
# This plugin is licensed under the Creative Commons
# Attribution-NonCommercial-ShareAlike 3.0 Unported
# License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc-sa/3.0/ or send a letter to Creative
# Commons, 559 Nathan Abbott Way, Stanford, California 94305, USA.
#
# Alternatively, this plugin may be distributed and executed on hardware which
# is licensed by Dream Multimedia GmbH.
#
# This plugin is NOT free software. It is open source, you are allowed to
# modify it (if you keep the license), but it may not be commercially
# distributed other than under the conditions noted above.
#
from __future__ import print_function
from __init__ import _
from Screens.Screen import Screen
from Screens.MessageBox import MessageBox
from Components.Pixmap import Pixmap
from Components.Button import Button
from Components.config import config, getConfigListEntry
from Components.ActionMap import ActionMap
from Components.ConfigList import ConfigListScreen
from Components.MultiContent import MultiContentEntryText
from Components.GUIComponent import GUIComponent
from Components.Sources.StaticText import StaticText
from enigma import eListboxPythonMultiContent, eListbox, gFont, RT_HALIGN_LEFT, RT_HALIGN_RIGHT
from Source.Remote.MessageServer import serverInstance, getIpAddress
from Source.Remote.Client import getClients
from time import localtime, strftime
from Source.Globals import SkinTools
staticIP = None
class ClientSetupList(GUIComponent):
def __init__(self, ip_address):
GUIComponent.__init__(self)
self.l = eListboxPythonMultiContent()
self.l.setFont(0, gFont("Regular", 22))
self.l.setFont(1, gFont("Regular", 18))
self.l.setItemHeight(100)
self.l.setBuildFunc(self.buildMovieListEntry)
self.onSelectionChanged = []
self.staticIP = ip_address
def connectSelChanged(self, fnc):
if not fnc in self.onSelectionChanged:
self.onSelectionChanged.append(fnc)
def disconnectSelChanged(self, fnc):
if fnc in self.onSelectionChanged:
self.onSelectionChanged.remove(fnc)
def selectionChanged(self):
for x in self.onSelectionChanged:
x()
def buildMovieListEntry(self, client):
res = [None]
width = self.l.getItemSize().width()
width_up_r = 250
width_up_l = width - width_up_r
width_dn_r = width / 2
width_dn_l = width - width_dn_r
pos_up_r = width - width_up_r
pos_dn_r = width - width_dn_r
if client.isRecording():
stby_text = _("Status:") + ' ' + _("Recording")
elif client.inStandby():
stby_text = _("Status:") + ' ' + _("Standby")
else:
stby_text = _("Status:") + ' ' + _("Switched on")
last_trash_clean_status = ""
lastEvent = client.lastTrashEvent()
if lastEvent == -1:
last_trash_clean_status = (_("The %s is a client box") % client.getDeviceName())
elif lastEvent > 0:
t = localtime(lastEvent)
last_trash_clean_status = _("Last remote wastebasket empty at %s") % (strftime(("%02d.%02d.%04d" % (t[2], t[1], t[0])) + ' ' + _("at") + ' ' + ("%02d:%02d" % (t[3], t[4])) + ' ' + _("Clock")))
next_trash_clean_status = ""
nextEvent = client.nextTrashEvent()
if nextEvent == -1:
trash_clean_status = (_("The %s is a client box") % client.getDeviceName())
elif nextEvent > 0:
t = localtime(nextEvent)
next_trash_clean_status = _("Next remote wastebasket empty at %s") % (strftime(("%02d.%02d.%04d" % (t[2], t[1], t[0])) + ' ' + _("at") + ' ' + ("%02d:%02d" % (t[3], t[4])) + ' ' + _("Clock")))
hostname = _("Hostname:") + ' ' + client.getDeviceName()
ip_addr = client.getAddress()
addr = _("IP:") + ' ' + ip_addr
if ip_addr == self.staticIP:
addr = addr + ' ' + _("<Local device>")
port = _("Port:") + ' ' + str(client.getPort())
res.append(MultiContentEntryText(pos=(5, 2), size=(width_up_l, 30), font=0, flags=RT_HALIGN_LEFT, text=hostname))
res.append(MultiContentEntryText(pos=(pos_up_r, 3), size=(width_up_r, 22), font=1, flags=RT_HALIGN_RIGHT, text=stby_text))
res.append(MultiContentEntryText(pos=(5, 26), size=(width_dn_l, 30), font=1, flags=RT_HALIGN_LEFT, text=addr))
res.append(MultiContentEntryText(pos=(pos_dn_r, 28), size=(width_dn_r, 22), font=1, flags=RT_HALIGN_RIGHT, text=port))
res.append(MultiContentEntryText(pos=(5, 50), size=(width, 30), font=1, flags=RT_HALIGN_LEFT, text=last_trash_clean_status))
res.append(MultiContentEntryText(pos=(5, 75), size=(width, 30), font=1, flags=RT_HALIGN_LEFT, text=next_trash_clean_status))
return res
def moveToIndex(self, index):
self.instance.moveSelectionTo(index)
def getCurrentIndex(self):
return self.instance.getCurrentIndex()
def getCurrent(self):
l = self.l.getCurrentSelection()
return l and l[0]
GUI_WIDGET = eListbox
def postWidgetCreate(self, instance):
instance.setContent(self.l)
instance.selectionChanged.get().append(self.selectionChanged)
def preWidgetRemove(self, instance):
instance.setContent(None)
instance.selectionChanged.get().remove(self.selectionChanged)
def reload(self):
self.list = []
for client in getClients():
self.list.append((client,))
print(client.getAddress())
self.l.setList(self.list)
def remove(self, x):
for l in self.list[:]:
if l[0] == x:
self.list.remove(l)
self.l.setList(self.list)
def __len__(self):
return len(self.list)
def moveTo(self, client):
count = 0
for x in self.list:
if x[0] == client:
self.instance.moveSelectionTo(count)
return True
count += 1
return False
class ClientSetup(ConfigListScreen, Screen):
def __init__(self, session):
Screen.__init__(self, session)
self.skinName = SkinTools.appendResolution("AdvancedMovieSelection_ClientSetup_")
self.staticIP = getIpAddress('eth0')
self.session = session
self["key_red"] = Button(_("Close"))
self["key_green"] = StaticText("")
self["key_yellow"] = StaticText("")
self["actions"] = ActionMap(["WizardActions", "MenuActions", "ShortcutActions", "EPGSelectActions"],
{
"ok": self.keySave,
"back": self.keyCancel,
"red": self.keyCancel,
"green": self.keySave,
"yellow": self.keyYellow,
"up": self.keyUp,
"down": self.keyDown,
"nextBouquet": self.keyBouquetUp,
"prevBouquet": self.keyBouquetDown,
}, -1)
self["status"] = StaticText("")
self["help"] = StaticText("")
self["green_button"] = Pixmap()
self["yellow_button"] = Pixmap()
self["green_button"].hide()
self["yellow_button"].hide()
self["clienttxt"] = StaticText("")
self["list"] = ClientSetupList(self.staticIP)
self.list = self["list"]
self.list.reload()
self.configList = []
ConfigListScreen.__init__(self, self.configList, session=self.session)
if not self.showHelp in self["config"].onSelectionChanged:
self["config"].onSelectionChanged.append(self.showHelp)
self.onShown.append(self.setWindowTitle)
def setWindowTitle(self):
self.setTitle(_("Advanced Movie Selection - Clientbox setup"))
if self.staticIP:
self.createSetup()
self["key_green"].setText(_("Save"))
self["key_yellow"].setText(_("Manual search"))
self["green_button"].show()
self["yellow_button"].show()
self["status"].setText(_("Local IP: %s") % self.staticIP)
if config.AdvancedMovieSelection.server_enabled.value:
self["clienttxt"].setText(_("Available Server/Clients"))
else:
self["clienttxt"].setText(_("Remoteserver disabled!"))
else:
self["status"].setText(_("ATTENTION: DHCP in lan configuration is activ, no clientbox services available!"))
def createSetup(self):
self.configList = []
self.configList.append(getConfigListEntry(_("Port address:"), config.AdvancedMovieSelection.server_port, _("Set the port address for client and server. Port address from connected clients will be automatically updated.")))
self.configList.append(getConfigListEntry(_("Start search IP:"), config.AdvancedMovieSelection.start_search_ip, _("Only last three digits from the IP must be set.")))
self.configList.append(getConfigListEntry(_("Stop search IP:"), config.AdvancedMovieSelection.stop_search_ip, _("Only last three digits from the IP must be set.")))
self["config"].setList(self.configList)
def showHelp(self):
current = self["config"].getCurrent()
if len(current) > 2 and current[2] is not None:
self["help"].setText(current[2])
else:
self["help"].setText(_("No Helptext available!"))
def cancelConfirm(self, result):
if not result:
return
for x in self["config"].list:
x[1].cancel()
self.close()
def keyCancel(self):
if self["config"].isChanged():
self.session.openWithCallback(self.cancelConfirm, MessageBox, _("Really close without saving settings?"))
else:
self.close()
def keySave(self):
if config.AdvancedMovieSelection.server_port.isChanged():
self.setPort()
if self.staticIP:
ConfigListScreen.keySave(self)
def keyYellow(self):
if self.staticIP:
if config.AdvancedMovieSelection.server_port.isChanged():
self.setPort()
self["status"].setText(_("Searching for clients, please wait ...")) #TODO: status wird nicht angezeigt ;(
serverInstance.setSearchRange(config.AdvancedMovieSelection.start_search_ip.value, config.AdvancedMovieSelection.stop_search_ip.value)
serverInstance.findClients()
self.finishedState()
def finishedState(self):
self["status"].setText(_("Manual search finished"))
self.list.reload()
def setPort(self):
config.AdvancedMovieSelection.server_port.save()
port = config.AdvancedMovieSelection.server_port.value
for client in getClients():
if client.getAddress() != self.staticIP:
client.setPort(port)
else:
# this only set the port of local client !don't reconnect it!
client.port = port
serverInstance.reconnect(port=port)
def keyUp(self):
self["config"].instance.moveSelection(self["config"].instance.moveUp)
def keyDown(self):
self["config"].instance.moveSelection(self["config"].instance.moveDown)
def keyBouquetUp(self):
self["list"].instance.moveSelection(self["list"].instance.pageUp)
def keyBouquetDown(self):
self["list"].instance.moveSelection(self["list"].instance.pageDown)
|
the-stack_0_11720 | import glob, os, shutil
if not os.path.exists('./converted'):
os.makedirs('./converted')
os.chdir('./labels')
for file in glob.glob("*.txt"):
f = open(file, "r")
line = f.read()
lineVals = line.split()
if (len(lineVals) > 19):
newLine = lineVals[0] + ' ' + lineVals[1] + ' ' + lineVals[2] + ' ' + lineVals[19] + ' ' + lineVals[20]
else:
newLine = ' '
with open('../converted/' + file, 'w') as file:
file.write(newLine)
os.chdir('../')
# delete all files in labels
shutil.rmtree('./labels')
# move converted to labels
os.rename('./converted', './labels')
# fix train and test
cwd = os.getcwd()
with open('./train_new.txt', 'w') as file:
f = open('./train.txt', "r")
for x in f:
file.write(x.replace('sspdFormat', cwd).replace('g ', 'g'))
f.close()
with open('./test_new.txt', 'w') as file:
f = open('./test.txt', "r")
for x in f:
file.write(x.replace('sspdFormat', cwd).replace('g ', 'g'))
f.close()
os.remove('./train.txt')
os.remove('./test.txt')
os.rename('./train_new.txt', './train.txt')
os.rename('./test_new.txt', './test.txt')
|
the-stack_0_11723 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2018, Simon Dodsley ([email protected])
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: purefa_dns
version_added: '2.8'
short_description: Configure FlashArray DNS settings
description:
- Set or erase configuration for the DNS settings.
- Nameservers provided will overwrite any existing nameservers.
author:
- Pure Storage Ansible Team (@sdodsley) <[email protected]>
options:
state:
description:
- Set or delete directory service configuration
default: present
type: str
choices: [ absent, present ]
domain:
description:
- Domain suffix to be appended when perofrming DNS lookups.
type: str
nameservers:
description:
- List of up to 3 unique DNS server IP addresses. These can be
IPv4 or IPv6 - No validation is done of the addresses is performed.
type: list
extends_documentation_fragment:
- purestorage.fa
'''
EXAMPLES = r'''
- name: Delete exisitng DNS settings
purefa_dns:
state: absent
fa_url: 10.10.10.2
api_token: e31060a7-21fc-e277-6240-25983c6c4592
- name: Set DNS settings
purefa_dns:
domain: purestorage.com
nameservers:
- 8.8.8.8
- 8.8.4.4
fa_url: 10.10.10.2
api_token: e31060a7-21fc-e277-6240-25983c6c4592
'''
RETURN = r'''
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pure import get_system, purefa_argument_spec
def remove(duplicate):
final_list = []
for num in duplicate:
if num not in final_list:
final_list.append(num)
return final_list
def delete_dns(module, array):
"""Delete DNS settings"""
changed = False
current_dns = array.get_dns()
if current_dns['domain'] == '' and current_dns['nameservers'] == ['']:
module.exit_json(changed=changed)
else:
try:
array.set_dns(domain='', nameservers=[])
changed = True
except Exception:
module.fail_json(msg='Delete DNS settigs failed')
module.exit_json(changed=changed)
def create_dns(module, array):
"""Set DNS settings"""
changed = False
current_dns = array.get_dns()
if current_dns['domain'] != module.params['domain'] or sorted(module.params['nameservers']) != sorted(current_dns['nameservers']):
try:
array.set_dns(domain=module.params['domain'],
nameservers=module.params['nameservers'][0:3])
changed = True
except Exception:
module.fail_json(msg='Set DNS settings failed: Check configuration')
module.exit_json(changed=changed)
def main():
argument_spec = purefa_argument_spec()
argument_spec.update(dict(
state=dict(type='str', default='present', choices=['absent', 'present']),
domain=dict(type='str'),
nameservers=dict(type='list'),
))
required_if = [('state', 'present', ['domain', 'nameservers'])]
module = AnsibleModule(argument_spec,
required_if=required_if,
supports_check_mode=False)
state = module.params['state']
array = get_system(module)
if state == 'absent':
delete_dns(module, array)
elif state == 'present':
module.params['nameservers'] = remove(module.params['nameservers'])
create_dns(module, array)
else:
module.exit_json(changed=False)
if __name__ == '__main__':
main()
|
the-stack_0_11724 | #!/usr/bin/env python3
# Copyright (c) 2019-2020 The Bitcoin Core and Devcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Test Taproot softfork (BIPs 340-342)
from test_framework.blocktools import (
COINBASE_MATURITY,
create_coinbase,
create_block,
add_witness_commitment,
MAX_BLOCK_SIGOPS_WEIGHT,
NORMAL_GBT_REQUEST_PARAMS,
WITNESS_SCALE_FACTOR,
)
from test_framework.messages import (
COutPoint,
CTransaction,
CTxIn,
CTxInWitness,
CTxOut,
)
from test_framework.script import (
ANNEX_TAG,
CScript,
CScriptNum,
CScriptOp,
LEAF_VERSION_TAPSCRIPT,
LegacySignatureHash,
LOCKTIME_THRESHOLD,
MAX_SCRIPT_ELEMENT_SIZE,
OP_0,
OP_1,
OP_2,
OP_3,
OP_4,
OP_5,
OP_6,
OP_7,
OP_8,
OP_9,
OP_10,
OP_11,
OP_12,
OP_16,
OP_2DROP,
OP_2DUP,
OP_CHECKMULTISIG,
OP_CHECKMULTISIGVERIFY,
OP_CHECKSIG,
OP_CHECKSIGADD,
OP_CHECKSIGVERIFY,
OP_CODESEPARATOR,
OP_DROP,
OP_DUP,
OP_ELSE,
OP_ENDIF,
OP_EQUAL,
OP_EQUALVERIFY,
OP_IF,
OP_NOP,
OP_NOT,
OP_NOTIF,
OP_PUSHDATA1,
OP_RETURN,
OP_SWAP,
OP_VERIFY,
SIGHASH_DEFAULT,
SIGHASH_ALL,
SIGHASH_NONE,
SIGHASH_SINGLE,
SIGHASH_ANYONECANPAY,
SegwitV0SignatureHash,
TaprootSignatureHash,
is_op_success,
taproot_construct,
)
from test_framework.script_util import (
key_to_p2wpkh_script,
keyhash_to_p2pkh_script,
script_to_p2sh_script,
script_to_p2wsh_script,
)
from test_framework.test_framework import DevcoinTestFramework
from test_framework.util import assert_raises_rpc_error, assert_equal
from test_framework.key import generate_privkey, compute_xonly_pubkey, sign_schnorr, tweak_add_privkey, ECKey
from test_framework.address import (
hash160,
)
from collections import OrderedDict, namedtuple
from io import BytesIO
import json
import hashlib
import os
import random
# === Framework for building spending transactions. ===
#
# The computation is represented as a "context" dict, whose entries store potentially-unevaluated expressions that
# refer to lower-level ones. By overwriting these expression, many aspects - both high and low level - of the signing
# process can be overridden.
#
# Specifically, a context object is a dict that maps names to compositions of:
# - values
# - lists of values
# - callables which, when fed the context object as argument, produce any of these
#
# The DEFAULT_CONTEXT object specifies a standard signing process, with many overridable knobs.
#
# The get(ctx, name) function can evaluate a name, and cache its result in the context.
# getter(name) can be used to construct a callable that evaluates name. For example:
#
# ctx1 = {**DEFAULT_CONTEXT, inputs=[getter("sign"), b'\x01']}
#
# creates a context where the script inputs are a signature plus the bytes 0x01.
#
# override(expr, name1=expr1, name2=expr2, ...) can be used to cause an expression to be evaluated in a selectively
# modified context. For example:
#
# ctx2 = {**DEFAULT_CONTEXT, sighash=override(default_sighash, hashtype=SIGHASH_DEFAULT)}
#
# creates a context ctx2 where the sighash is modified to use hashtype=SIGHASH_DEFAULT. This differs from
#
# ctx3 = {**DEFAULT_CONTEXT, hashtype=SIGHASH_DEFAULT}
#
# in that ctx3 will globally use hashtype=SIGHASH_DEFAULT (including in the hashtype byte appended to the signature)
# while ctx2 only uses the modified hashtype inside the sighash calculation.
def deep_eval(ctx, expr):
"""Recursively replace any callables c in expr (including inside lists) with c(ctx)."""
while callable(expr):
expr = expr(ctx)
if isinstance(expr, list):
expr = [deep_eval(ctx, x) for x in expr]
return expr
# Data type to represent fully-evaluated expressions in a context dict (so we can avoid reevaluating them).
Final = namedtuple("Final", "value")
def get(ctx, name):
"""Evaluate name in context ctx."""
assert name in ctx, "Missing '%s' in context" % name
expr = ctx[name]
if not isinstance(expr, Final):
# Evaluate and cache the result.
expr = Final(deep_eval(ctx, expr))
ctx[name] = expr
return expr.value
def getter(name):
"""Return a callable that evaluates name in its passed context."""
return lambda ctx: get(ctx, name)
def override(expr, **kwargs):
"""Return a callable that evaluates expr in a modified context."""
return lambda ctx: deep_eval({**ctx, **kwargs}, expr)
# === Implementations for the various default expressions in DEFAULT_CONTEXT ===
def default_hashtype(ctx):
"""Default expression for "hashtype": SIGHASH_DEFAULT for taproot, SIGHASH_ALL otherwise."""
mode = get(ctx, "mode")
if mode == "taproot":
return SIGHASH_DEFAULT
else:
return SIGHASH_ALL
def default_tapleaf(ctx):
"""Default expression for "tapleaf": looking up leaf in tap[2]."""
return get(ctx, "tap").leaves[get(ctx, "leaf")]
def default_script_taproot(ctx):
"""Default expression for "script_taproot": tapleaf.script."""
return get(ctx, "tapleaf").script
def default_leafversion(ctx):
"""Default expression for "leafversion": tapleaf.version"""
return get(ctx, "tapleaf").version
def default_negflag(ctx):
"""Default expression for "negflag": tap.negflag."""
return get(ctx, "tap").negflag
def default_pubkey_internal(ctx):
"""Default expression for "pubkey_internal": tap.internal_pubkey."""
return get(ctx, "tap").internal_pubkey
def default_merklebranch(ctx):
"""Default expression for "merklebranch": tapleaf.merklebranch."""
return get(ctx, "tapleaf").merklebranch
def default_controlblock(ctx):
"""Default expression for "controlblock": combine leafversion, negflag, pubkey_internal, merklebranch."""
return bytes([get(ctx, "leafversion") + get(ctx, "negflag")]) + get(ctx, "pubkey_internal") + get(ctx, "merklebranch")
def default_sighash(ctx):
"""Default expression for "sighash": depending on mode, compute BIP341, BIP143, or legacy sighash."""
tx = get(ctx, "tx")
idx = get(ctx, "idx")
hashtype = get(ctx, "hashtype_actual")
mode = get(ctx, "mode")
if mode == "taproot":
# BIP341 signature hash
utxos = get(ctx, "utxos")
annex = get(ctx, "annex")
if get(ctx, "leaf") is not None:
codeseppos = get(ctx, "codeseppos")
leaf_ver = get(ctx, "leafversion")
script = get(ctx, "script_taproot")
return TaprootSignatureHash(tx, utxos, hashtype, idx, scriptpath=True, script=script, leaf_ver=leaf_ver, codeseparator_pos=codeseppos, annex=annex)
else:
return TaprootSignatureHash(tx, utxos, hashtype, idx, scriptpath=False, annex=annex)
elif mode == "witv0":
# BIP143 signature hash
scriptcode = get(ctx, "scriptcode")
utxos = get(ctx, "utxos")
return SegwitV0SignatureHash(scriptcode, tx, idx, hashtype, utxos[idx].nValue)
else:
# Pre-segwit signature hash
scriptcode = get(ctx, "scriptcode")
return LegacySignatureHash(scriptcode, tx, idx, hashtype)[0]
def default_tweak(ctx):
"""Default expression for "tweak": None if a leaf is specified, tap[0] otherwise."""
if get(ctx, "leaf") is None:
return get(ctx, "tap").tweak
return None
def default_key_tweaked(ctx):
"""Default expression for "key_tweaked": key if tweak is None, tweaked with it otherwise."""
key = get(ctx, "key")
tweak = get(ctx, "tweak")
if tweak is None:
return key
else:
return tweak_add_privkey(key, tweak)
def default_signature(ctx):
"""Default expression for "signature": BIP340 signature or ECDSA signature depending on mode."""
sighash = get(ctx, "sighash")
if get(ctx, "mode") == "taproot":
key = get(ctx, "key_tweaked")
flip_r = get(ctx, "flag_flip_r")
flip_p = get(ctx, "flag_flip_p")
return sign_schnorr(key, sighash, flip_r=flip_r, flip_p=flip_p)
else:
key = get(ctx, "key")
return key.sign_ecdsa(sighash)
def default_hashtype_actual(ctx):
"""Default expression for "hashtype_actual": hashtype, unless mismatching SIGHASH_SINGLE in taproot."""
hashtype = get(ctx, "hashtype")
mode = get(ctx, "mode")
if mode != "taproot":
return hashtype
idx = get(ctx, "idx")
tx = get(ctx, "tx")
if hashtype & 3 == SIGHASH_SINGLE and idx >= len(tx.vout):
return (hashtype & ~3) | SIGHASH_NONE
return hashtype
def default_bytes_hashtype(ctx):
"""Default expression for "bytes_hashtype": bytes([hashtype_actual]) if not 0, b"" otherwise."""
return bytes([x for x in [get(ctx, "hashtype_actual")] if x != 0])
def default_sign(ctx):
"""Default expression for "sign": concatenation of signature and bytes_hashtype."""
return get(ctx, "signature") + get(ctx, "bytes_hashtype")
def default_inputs_keypath(ctx):
"""Default expression for "inputs_keypath": a signature."""
return [get(ctx, "sign")]
def default_witness_taproot(ctx):
"""Default expression for "witness_taproot", consisting of inputs, script, control block, and annex as needed."""
annex = get(ctx, "annex")
suffix_annex = []
if annex is not None:
suffix_annex = [annex]
if get(ctx, "leaf") is None:
return get(ctx, "inputs_keypath") + suffix_annex
else:
return get(ctx, "inputs") + [bytes(get(ctx, "script_taproot")), get(ctx, "controlblock")] + suffix_annex
def default_witness_witv0(ctx):
"""Default expression for "witness_witv0", consisting of inputs and witness script, as needed."""
script = get(ctx, "script_witv0")
inputs = get(ctx, "inputs")
if script is None:
return inputs
else:
return inputs + [script]
def default_witness(ctx):
"""Default expression for "witness", delegating to "witness_taproot" or "witness_witv0" as needed."""
mode = get(ctx, "mode")
if mode == "taproot":
return get(ctx, "witness_taproot")
elif mode == "witv0":
return get(ctx, "witness_witv0")
else:
return []
def default_scriptsig(ctx):
"""Default expression for "scriptsig", consisting of inputs and redeemscript, as needed."""
scriptsig = []
mode = get(ctx, "mode")
if mode == "legacy":
scriptsig = get(ctx, "inputs")
redeemscript = get(ctx, "script_p2sh")
if redeemscript is not None:
scriptsig += [bytes(redeemscript)]
return scriptsig
# The default context object.
DEFAULT_CONTEXT = {
# == The main expressions to evaluate. Only override these for unusual or invalid spends. ==
# The overall witness stack, as a list of bytes objects.
"witness": default_witness,
# The overall scriptsig, as a list of CScript objects (to be concatenated) and bytes objects (to be pushed)
"scriptsig": default_scriptsig,
# == Expressions you'll generally only override for intentionally invalid spends. ==
# The witness stack for spending a taproot output.
"witness_taproot": default_witness_taproot,
# The witness stack for spending a P2WPKH/P2WSH output.
"witness_witv0": default_witness_witv0,
# The script inputs for a taproot key path spend.
"inputs_keypath": default_inputs_keypath,
# The actual hashtype to use (usually equal to hashtype, but in taproot SIGHASH_SINGLE is not always allowed).
"hashtype_actual": default_hashtype_actual,
# The bytes object for a full signature (including hashtype byte, if needed).
"bytes_hashtype": default_bytes_hashtype,
# A full script signature (bytes including hashtype, if needed)
"sign": default_sign,
# An ECDSA or Schnorr signature (excluding hashtype byte).
"signature": default_signature,
# The 32-byte tweaked key (equal to key for script path spends, or key+tweak for key path spends).
"key_tweaked": default_key_tweaked,
# The tweak to use (None for script path spends, the actual tweak for key path spends).
"tweak": default_tweak,
# The sighash value (32 bytes)
"sighash": default_sighash,
# The information about the chosen script path spend (TaprootLeafInfo object).
"tapleaf": default_tapleaf,
# The script to push, and include in the sighash, for a taproot script path spend.
"script_taproot": default_script_taproot,
# The internal pubkey for a taproot script path spend (32 bytes).
"pubkey_internal": default_pubkey_internal,
# The negation flag of the internal pubkey for a taproot script path spend.
"negflag": default_negflag,
# The leaf version to include in the sighash (this does not affect the one in the control block).
"leafversion": default_leafversion,
# The Merkle path to include in the control block for a script path spend.
"merklebranch": default_merklebranch,
# The control block to push for a taproot script path spend.
"controlblock": default_controlblock,
# Whether to produce signatures with invalid P sign (Schnorr signatures only).
"flag_flip_p": False,
# Whether to produce signatures with invalid R sign (Schnorr signatures only).
"flag_flip_r": False,
# == Parameters that can be changed without invalidating, but do have a default: ==
# The hashtype (as an integer).
"hashtype": default_hashtype,
# The annex (only when mode=="taproot").
"annex": None,
# The codeseparator position (only when mode=="taproot").
"codeseppos": -1,
# The redeemscript to add to the scriptSig (if P2SH; None implies not P2SH).
"script_p2sh": None,
# The script to add to the witness in (if P2WSH; None implies P2WPKH)
"script_witv0": None,
# The leaf to use in taproot spends (if script path spend; None implies key path spend).
"leaf": None,
# The input arguments to provide to the executed script
"inputs": [],
# == Parameters to be set before evaluation: ==
# - mode: what spending style to use ("taproot", "witv0", or "legacy").
# - key: the (untweaked) private key to sign with (ECKey object for ECDSA, 32 bytes for Schnorr).
# - tap: the TaprootInfo object (see taproot_construct; needed in mode=="taproot").
# - tx: the transaction to sign.
# - utxos: the UTXOs being spent (needed in mode=="witv0" and mode=="taproot").
# - idx: the input position being signed.
# - scriptcode: the scriptcode to include in legacy and witv0 sighashes.
}
def flatten(lst):
ret = []
for elem in lst:
if isinstance(elem, list):
ret += flatten(elem)
else:
ret.append(elem)
return ret
def spend(tx, idx, utxos, **kwargs):
"""Sign transaction input idx of tx, provided utxos is the list of outputs being spent.
Additional arguments may be provided that override any aspect of the signing process.
See DEFAULT_CONTEXT above for what can be overridden, and what must be provided.
"""
ctx = {**DEFAULT_CONTEXT, "tx":tx, "idx":idx, "utxos":utxos, **kwargs}
def to_script(elem):
"""If fed a CScript, return it; if fed bytes, return a CScript that pushes it."""
if isinstance(elem, CScript):
return elem
else:
return CScript([elem])
scriptsig_list = flatten(get(ctx, "scriptsig"))
scriptsig = CScript(b"".join(bytes(to_script(elem)) for elem in scriptsig_list))
witness_stack = flatten(get(ctx, "witness"))
return (scriptsig, witness_stack)
# === Spender objects ===
#
# Each spender is a tuple of:
# - A scriptPubKey which is to be spent from (CScript)
# - A comment describing the test (string)
# - Whether the spending (on itself) is expected to be standard (bool)
# - A tx-signing lambda returning (scriptsig, witness_stack), taking as inputs:
# - A transaction to sign (CTransaction)
# - An input position (int)
# - The spent UTXOs by this transaction (list of CTxOut)
# - Whether to produce a valid spend (bool)
# - A string with an expected error message for failure case if known
# - The (pre-taproot) sigops weight consumed by a successful spend
# - Whether this spend cannot fail
# - Whether this test demands being placed in a txin with no corresponding txout (for testing SIGHASH_SINGLE behavior)
Spender = namedtuple("Spender", "script,comment,is_standard,sat_function,err_msg,sigops_weight,no_fail,need_vin_vout_mismatch")
def make_spender(comment, *, tap=None, witv0=False, script=None, pkh=None, p2sh=False, spk_mutate_pre_p2sh=None, failure=None, standard=True, err_msg=None, sigops_weight=0, need_vin_vout_mismatch=False, **kwargs):
"""Helper for constructing Spender objects using the context signing framework.
* tap: a TaprootInfo object (see taproot_construct), for Taproot spends (cannot be combined with pkh, witv0, or script)
* witv0: boolean indicating the use of witness v0 spending (needs one of script or pkh)
* script: the actual script executed (for bare/P2WSH/P2SH spending)
* pkh: the public key for P2PKH or P2WPKH spending
* p2sh: whether the output is P2SH wrapper (this is supported even for Taproot, where it makes the output unencumbered)
* spk_mutate_pre_psh: a callable to be applied to the script (before potentially P2SH-wrapping it)
* failure: a dict of entries to override in the context when intentionally failing to spend (if None, no_fail will be set)
* standard: whether the (valid version of) spending is expected to be standard
* err_msg: a string with an expected error message for failure (or None, if not cared about)
* sigops_weight: the pre-taproot sigops weight consumed by a successful spend
* need_vin_vout_mismatch: whether this test requires being tested in a transaction input that has no corresponding
transaction output.
"""
conf = dict()
# Compute scriptPubKey and set useful defaults based on the inputs.
if witv0:
assert tap is None
conf["mode"] = "witv0"
if pkh is not None:
# P2WPKH
assert script is None
pubkeyhash = hash160(pkh)
spk = key_to_p2wpkh_script(pkh)
conf["scriptcode"] = keyhash_to_p2pkh_script(pubkeyhash)
conf["script_witv0"] = None
conf["inputs"] = [getter("sign"), pkh]
elif script is not None:
# P2WSH
spk = script_to_p2wsh_script(script)
conf["scriptcode"] = script
conf["script_witv0"] = script
else:
assert False
elif tap is None:
conf["mode"] = "legacy"
if pkh is not None:
# P2PKH
assert script is None
pubkeyhash = hash160(pkh)
spk = keyhash_to_p2pkh_script(pubkeyhash)
conf["scriptcode"] = spk
conf["inputs"] = [getter("sign"), pkh]
elif script is not None:
# bare
spk = script
conf["scriptcode"] = script
else:
assert False
else:
assert script is None
conf["mode"] = "taproot"
conf["tap"] = tap
spk = tap.scriptPubKey
if spk_mutate_pre_p2sh is not None:
spk = spk_mutate_pre_p2sh(spk)
if p2sh:
# P2SH wrapper can be combined with anything else
conf["script_p2sh"] = spk
spk = script_to_p2sh_script(spk)
conf = {**conf, **kwargs}
def sat_fn(tx, idx, utxos, valid):
if valid:
return spend(tx, idx, utxos, **conf)
else:
assert failure is not None
return spend(tx, idx, utxos, **{**conf, **failure})
return Spender(script=spk, comment=comment, is_standard=standard, sat_function=sat_fn, err_msg=err_msg, sigops_weight=sigops_weight, no_fail=failure is None, need_vin_vout_mismatch=need_vin_vout_mismatch)
def add_spender(spenders, *args, **kwargs):
"""Make a spender using make_spender, and add it to spenders."""
spenders.append(make_spender(*args, **kwargs))
# === Helpers for the test ===
def random_checksig_style(pubkey):
"""Creates a random CHECKSIG* tapscript that would succeed with only the valid signature on witness stack."""
opcode = random.choice([OP_CHECKSIG, OP_CHECKSIGVERIFY, OP_CHECKSIGADD])
if opcode == OP_CHECKSIGVERIFY:
ret = CScript([pubkey, opcode, OP_1])
elif opcode == OP_CHECKSIGADD:
num = random.choice([0, 0x7fffffff, -0x7fffffff])
ret = CScript([num, pubkey, opcode, num + 1, OP_EQUAL])
else:
ret = CScript([pubkey, opcode])
return bytes(ret)
def random_bytes(n):
"""Return a random bytes object of length n."""
return bytes(random.getrandbits(8) for i in range(n))
def bitflipper(expr):
"""Return a callable that evaluates expr and returns it with a random bitflip."""
def fn(ctx):
sub = deep_eval(ctx, expr)
assert isinstance(sub, bytes)
return (int.from_bytes(sub, 'little') ^ (1 << random.randrange(len(sub) * 8))).to_bytes(len(sub), 'little')
return fn
def zero_appender(expr):
"""Return a callable that evaluates expr and returns it with a zero added."""
return lambda ctx: deep_eval(ctx, expr) + b"\x00"
def byte_popper(expr):
"""Return a callable that evaluates expr and returns it with its last byte removed."""
return lambda ctx: deep_eval(ctx, expr)[:-1]
# Expected error strings
ERR_SIG_SIZE = {"err_msg": "Invalid Schnorr signature size"}
ERR_SIG_HASHTYPE = {"err_msg": "Invalid Schnorr signature hash type"}
ERR_SIG_SCHNORR = {"err_msg": "Invalid Schnorr signature"}
ERR_OP_RETURN = {"err_msg": "OP_RETURN was encountered"}
ERR_CONTROLBLOCK_SIZE = {"err_msg": "Invalid Taproot control block size"}
ERR_WITNESS_PROGRAM_MISMATCH = {"err_msg": "Witness program hash mismatch"}
ERR_PUSH_LIMIT = {"err_msg": "Push value size limit exceeded"}
ERR_DISABLED_OPCODE = {"err_msg": "Attempted to use a disabled opcode"}
ERR_TAPSCRIPT_CHECKMULTISIG = {"err_msg": "OP_CHECKMULTISIG(VERIFY) is not available in tapscript"}
ERR_MINIMALIF = {"err_msg": "OP_IF/NOTIF argument must be minimal in tapscript"}
ERR_UNKNOWN_PUBKEY = {"err_msg": "Public key is neither compressed or uncompressed"}
ERR_STACK_SIZE = {"err_msg": "Stack size limit exceeded"}
ERR_CLEANSTACK = {"err_msg": "Stack size must be exactly one after execution"}
ERR_STACK_EMPTY = {"err_msg": "Operation not valid with the current stack size"}
ERR_SIGOPS_RATIO = {"err_msg": "Too much signature validation relative to witness weight"}
ERR_UNDECODABLE = {"err_msg": "Opcode missing or not understood"}
ERR_NO_SUCCESS = {"err_msg": "Script evaluated without error but finished with a false/empty top stack element"}
ERR_EMPTY_WITNESS = {"err_msg": "Witness program was passed an empty witness"}
ERR_CHECKSIGVERIFY = {"err_msg": "Script failed an OP_CHECKSIGVERIFY operation"}
VALID_SIGHASHES_ECDSA = [
SIGHASH_ALL,
SIGHASH_NONE,
SIGHASH_SINGLE,
SIGHASH_ANYONECANPAY + SIGHASH_ALL,
SIGHASH_ANYONECANPAY + SIGHASH_NONE,
SIGHASH_ANYONECANPAY + SIGHASH_SINGLE
]
VALID_SIGHASHES_TAPROOT = [SIGHASH_DEFAULT] + VALID_SIGHASHES_ECDSA
VALID_SIGHASHES_TAPROOT_SINGLE = [
SIGHASH_SINGLE,
SIGHASH_ANYONECANPAY + SIGHASH_SINGLE
]
VALID_SIGHASHES_TAPROOT_NO_SINGLE = [h for h in VALID_SIGHASHES_TAPROOT if h not in VALID_SIGHASHES_TAPROOT_SINGLE]
SIGHASH_BITFLIP = {"failure": {"sighash": bitflipper(default_sighash)}}
SIG_POP_BYTE = {"failure": {"sign": byte_popper(default_sign)}}
SINGLE_SIG = {"inputs": [getter("sign")]}
SIG_ADD_ZERO = {"failure": {"sign": zero_appender(default_sign)}}
DUST_LIMIT = 600
MIN_FEE = 50000
# === Actual test cases ===
def spenders_taproot_active():
"""Return a list of Spenders for testing post-Taproot activation behavior."""
secs = [generate_privkey() for _ in range(8)]
pubs = [compute_xonly_pubkey(sec)[0] for sec in secs]
spenders = []
# == Tests for BIP340 signature validation. ==
# These are primarily tested through the test vectors implemented in libsecp256k1, and in src/tests/key_tests.cpp.
# Some things are tested programmatically as well here.
tap = taproot_construct(pubs[0])
# Test with key with bit flipped.
add_spender(spenders, "sig/key", tap=tap, key=secs[0], failure={"key_tweaked": bitflipper(default_key_tweaked)}, **ERR_SIG_SCHNORR)
# Test with sighash with bit flipped.
add_spender(spenders, "sig/sighash", tap=tap, key=secs[0], failure={"sighash": bitflipper(default_sighash)}, **ERR_SIG_SCHNORR)
# Test with invalid R sign.
add_spender(spenders, "sig/flip_r", tap=tap, key=secs[0], failure={"flag_flip_r": True}, **ERR_SIG_SCHNORR)
# Test with invalid P sign.
add_spender(spenders, "sig/flip_p", tap=tap, key=secs[0], failure={"flag_flip_p": True}, **ERR_SIG_SCHNORR)
# Test with signature with bit flipped.
add_spender(spenders, "sig/bitflip", tap=tap, key=secs[0], failure={"signature": bitflipper(default_signature)}, **ERR_SIG_SCHNORR)
# == Tests for signature hashing ==
# Run all tests once with no annex, and once with a valid random annex.
for annex in [None, lambda _: bytes([ANNEX_TAG]) + random_bytes(random.randrange(0, 250))]:
# Non-empty annex is non-standard
no_annex = annex is None
# Sighash mutation tests (test all sighash combinations)
for hashtype in VALID_SIGHASHES_TAPROOT:
common = {"annex": annex, "hashtype": hashtype, "standard": no_annex}
# Pure pubkey
tap = taproot_construct(pubs[0])
add_spender(spenders, "sighash/purepk", tap=tap, key=secs[0], **common, **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)
# Pubkey/P2PK script combination
scripts = [("s0", CScript(random_checksig_style(pubs[1])))]
tap = taproot_construct(pubs[0], scripts)
add_spender(spenders, "sighash/keypath_hashtype_%x" % hashtype, tap=tap, key=secs[0], **common, **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/scriptpath_hashtype_%x" % hashtype, tap=tap, leaf="s0", key=secs[1], **common, **SINGLE_SIG, **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)
# Test SIGHASH_SINGLE behavior in combination with mismatching outputs
if hashtype in VALID_SIGHASHES_TAPROOT_SINGLE:
add_spender(spenders, "sighash/keypath_hashtype_mis_%x" % hashtype, tap=tap, key=secs[0], annex=annex, standard=no_annex, hashtype_actual=random.choice(VALID_SIGHASHES_TAPROOT_NO_SINGLE), failure={"hashtype_actual": hashtype}, **ERR_SIG_HASHTYPE, need_vin_vout_mismatch=True)
add_spender(spenders, "sighash/scriptpath_hashtype_mis_%x" % hashtype, tap=tap, leaf="s0", key=secs[1], annex=annex, standard=no_annex, hashtype_actual=random.choice(VALID_SIGHASHES_TAPROOT_NO_SINGLE), **SINGLE_SIG, failure={"hashtype_actual": hashtype}, **ERR_SIG_HASHTYPE, need_vin_vout_mismatch=True)
# Test OP_CODESEPARATOR impact on sighashing.
hashtype = lambda _: random.choice(VALID_SIGHASHES_TAPROOT)
common = {"annex": annex, "hashtype": hashtype, "standard": no_annex}
scripts = [
("pk_codesep", CScript(random_checksig_style(pubs[1]) + bytes([OP_CODESEPARATOR]))), # codesep after checksig
("codesep_pk", CScript(bytes([OP_CODESEPARATOR]) + random_checksig_style(pubs[1]))), # codesep before checksig
("branched_codesep", CScript([random_bytes(random.randrange(511)), OP_DROP, OP_IF, OP_CODESEPARATOR, pubs[0], OP_ELSE, OP_CODESEPARATOR, pubs[1], OP_ENDIF, OP_CHECKSIG])), # branch dependent codesep
]
random.shuffle(scripts)
tap = taproot_construct(pubs[0], scripts)
add_spender(spenders, "sighash/pk_codesep", tap=tap, leaf="pk_codesep", key=secs[1], **common, **SINGLE_SIG, **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/codesep_pk", tap=tap, leaf="codesep_pk", key=secs[1], codeseppos=0, **common, **SINGLE_SIG, **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/branched_codesep/left", tap=tap, leaf="branched_codesep", key=secs[0], codeseppos=3, **common, inputs=[getter("sign"), b'\x01'], **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/branched_codesep/right", tap=tap, leaf="branched_codesep", key=secs[1], codeseppos=6, **common, inputs=[getter("sign"), b''], **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)
# Reusing the scripts above, test that various features affect the sighash.
add_spender(spenders, "sighash/annex", tap=tap, leaf="pk_codesep", key=secs[1], hashtype=hashtype, standard=False, **SINGLE_SIG, annex=bytes([ANNEX_TAG]), failure={"sighash": override(default_sighash, annex=None)}, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/script", tap=tap, leaf="pk_codesep", key=secs[1], **common, **SINGLE_SIG, failure={"sighash": override(default_sighash, script_taproot=tap.leaves["codesep_pk"].script)}, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/leafver", tap=tap, leaf="pk_codesep", key=secs[1], **common, **SINGLE_SIG, failure={"sighash": override(default_sighash, leafversion=random.choice([x & 0xFE for x in range(0x100) if x & 0xFE != 0xC0]))}, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/scriptpath", tap=tap, leaf="pk_codesep", key=secs[1], **common, **SINGLE_SIG, failure={"sighash": override(default_sighash, leaf=None)}, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/keypath", tap=tap, key=secs[0], **common, failure={"sighash": override(default_sighash, leaf="pk_codesep")}, **ERR_SIG_SCHNORR)
# Test that invalid hashtypes don't work, both in key path and script path spends
hashtype = lambda _: random.choice(VALID_SIGHASHES_TAPROOT)
for invalid_hashtype in [x for x in range(0x100) if x not in VALID_SIGHASHES_TAPROOT]:
add_spender(spenders, "sighash/keypath_unk_hashtype_%x" % invalid_hashtype, tap=tap, key=secs[0], hashtype=hashtype, failure={"hashtype": invalid_hashtype}, **ERR_SIG_HASHTYPE)
add_spender(spenders, "sighash/scriptpath_unk_hashtype_%x" % invalid_hashtype, tap=tap, leaf="pk_codesep", key=secs[1], **SINGLE_SIG, hashtype=hashtype, failure={"hashtype": invalid_hashtype}, **ERR_SIG_HASHTYPE)
# Test that hashtype 0 cannot have a hashtype byte, and 1 must have one.
add_spender(spenders, "sighash/hashtype0_byte_keypath", tap=tap, key=secs[0], hashtype=SIGHASH_DEFAULT, failure={"bytes_hashtype": bytes([SIGHASH_DEFAULT])}, **ERR_SIG_HASHTYPE)
add_spender(spenders, "sighash/hashtype0_byte_scriptpath", tap=tap, leaf="pk_codesep", key=secs[1], **SINGLE_SIG, hashtype=SIGHASH_DEFAULT, failure={"bytes_hashtype": bytes([SIGHASH_DEFAULT])}, **ERR_SIG_HASHTYPE)
add_spender(spenders, "sighash/hashtype1_byte_keypath", tap=tap, key=secs[0], hashtype=SIGHASH_ALL, failure={"bytes_hashtype": b''}, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/hashtype1_byte_scriptpath", tap=tap, leaf="pk_codesep", key=secs[1], **SINGLE_SIG, hashtype=SIGHASH_ALL, failure={"bytes_hashtype": b''}, **ERR_SIG_SCHNORR)
# Test that hashtype 0 and hashtype 1 cannot be transmuted into each other.
add_spender(spenders, "sighash/hashtype0to1_keypath", tap=tap, key=secs[0], hashtype=SIGHASH_DEFAULT, failure={"bytes_hashtype": bytes([SIGHASH_ALL])}, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/hashtype0to1_scriptpath", tap=tap, leaf="pk_codesep", key=secs[1], **SINGLE_SIG, hashtype=SIGHASH_DEFAULT, failure={"bytes_hashtype": bytes([SIGHASH_ALL])}, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/hashtype1to0_keypath", tap=tap, key=secs[0], hashtype=SIGHASH_ALL, failure={"bytes_hashtype": b''}, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/hashtype1to0_scriptpath", tap=tap, leaf="pk_codesep", key=secs[1], **SINGLE_SIG, hashtype=SIGHASH_ALL, failure={"bytes_hashtype": b''}, **ERR_SIG_SCHNORR)
# Test aspects of signatures with unusual lengths
for hashtype in [SIGHASH_DEFAULT, random.choice(VALID_SIGHASHES_TAPROOT)]:
scripts = [
("csv", CScript([pubs[2], OP_CHECKSIGVERIFY, OP_1])),
("cs_pos", CScript([pubs[2], OP_CHECKSIG])),
("csa_pos", CScript([OP_0, pubs[2], OP_CHECKSIGADD, OP_1, OP_EQUAL])),
("cs_neg", CScript([pubs[2], OP_CHECKSIG, OP_NOT])),
("csa_neg", CScript([OP_2, pubs[2], OP_CHECKSIGADD, OP_2, OP_EQUAL]))
]
random.shuffle(scripts)
tap = taproot_construct(pubs[3], scripts)
# Empty signatures
add_spender(spenders, "siglen/empty_keypath", tap=tap, key=secs[3], hashtype=hashtype, failure={"sign": b""}, **ERR_SIG_SIZE)
add_spender(spenders, "siglen/empty_csv", tap=tap, key=secs[2], leaf="csv", hashtype=hashtype, **SINGLE_SIG, failure={"sign": b""}, **ERR_CHECKSIGVERIFY)
add_spender(spenders, "siglen/empty_cs", tap=tap, key=secs[2], leaf="cs_pos", hashtype=hashtype, **SINGLE_SIG, failure={"sign": b""}, **ERR_NO_SUCCESS)
add_spender(spenders, "siglen/empty_csa", tap=tap, key=secs[2], leaf="csa_pos", hashtype=hashtype, **SINGLE_SIG, failure={"sign": b""}, **ERR_NO_SUCCESS)
add_spender(spenders, "siglen/empty_cs_neg", tap=tap, key=secs[2], leaf="cs_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", failure={"sign": lambda _: random_bytes(random.randrange(1, 63))}, **ERR_SIG_SIZE)
add_spender(spenders, "siglen/empty_csa_neg", tap=tap, key=secs[2], leaf="csa_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", failure={"sign": lambda _: random_bytes(random.randrange(66, 100))}, **ERR_SIG_SIZE)
# Appending a zero byte to signatures invalidates them
add_spender(spenders, "siglen/padzero_keypath", tap=tap, key=secs[3], hashtype=hashtype, **SIG_ADD_ZERO, **(ERR_SIG_HASHTYPE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SIZE))
add_spender(spenders, "siglen/padzero_csv", tap=tap, key=secs[2], leaf="csv", hashtype=hashtype, **SINGLE_SIG, **SIG_ADD_ZERO, **(ERR_SIG_HASHTYPE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SIZE))
add_spender(spenders, "siglen/padzero_cs", tap=tap, key=secs[2], leaf="cs_pos", hashtype=hashtype, **SINGLE_SIG, **SIG_ADD_ZERO, **(ERR_SIG_HASHTYPE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SIZE))
add_spender(spenders, "siglen/padzero_csa", tap=tap, key=secs[2], leaf="csa_pos", hashtype=hashtype, **SINGLE_SIG, **SIG_ADD_ZERO, **(ERR_SIG_HASHTYPE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SIZE))
add_spender(spenders, "siglen/padzero_cs_neg", tap=tap, key=secs[2], leaf="cs_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", **SIG_ADD_ZERO, **(ERR_SIG_HASHTYPE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SIZE))
add_spender(spenders, "siglen/padzero_csa_neg", tap=tap, key=secs[2], leaf="csa_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", **SIG_ADD_ZERO, **(ERR_SIG_HASHTYPE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SIZE))
# Removing the last byte from signatures invalidates them
add_spender(spenders, "siglen/popbyte_keypath", tap=tap, key=secs[3], hashtype=hashtype, **SIG_POP_BYTE, **(ERR_SIG_SIZE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SCHNORR))
add_spender(spenders, "siglen/popbyte_csv", tap=tap, key=secs[2], leaf="csv", hashtype=hashtype, **SINGLE_SIG, **SIG_POP_BYTE, **(ERR_SIG_SIZE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SCHNORR))
add_spender(spenders, "siglen/popbyte_cs", tap=tap, key=secs[2], leaf="cs_pos", hashtype=hashtype, **SINGLE_SIG, **SIG_POP_BYTE, **(ERR_SIG_SIZE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SCHNORR))
add_spender(spenders, "siglen/popbyte_csa", tap=tap, key=secs[2], leaf="csa_pos", hashtype=hashtype, **SINGLE_SIG, **SIG_POP_BYTE, **(ERR_SIG_SIZE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SCHNORR))
add_spender(spenders, "siglen/popbyte_cs_neg", tap=tap, key=secs[2], leaf="cs_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", **SIG_POP_BYTE, **(ERR_SIG_SIZE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SCHNORR))
add_spender(spenders, "siglen/popbyte_csa_neg", tap=tap, key=secs[2], leaf="csa_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", **SIG_POP_BYTE, **(ERR_SIG_SIZE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SCHNORR))
# Verify that an invalid signature is not allowed, not even when the CHECKSIG* is expected to fail.
add_spender(spenders, "siglen/invalid_cs_neg", tap=tap, key=secs[2], leaf="cs_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", failure={"sign": default_sign, "sighash": bitflipper(default_sighash)}, **ERR_SIG_SCHNORR)
add_spender(spenders, "siglen/invalid_csa_neg", tap=tap, key=secs[2], leaf="csa_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", failure={"sign": default_sign, "sighash": bitflipper(default_sighash)}, **ERR_SIG_SCHNORR)
# == Test that BIP341 spending only applies to witness version 1, program length 32, no P2SH ==
for p2sh in [False, True]:
for witver in range(1, 17):
for witlen in [20, 31, 32, 33]:
def mutate(spk):
prog = spk[2:]
assert len(prog) == 32
if witlen < 32:
prog = prog[0:witlen]
elif witlen > 32:
prog += bytes([0 for _ in range(witlen - 32)])
return CScript([CScriptOp.encode_op_n(witver), prog])
scripts = [("s0", CScript([pubs[0], OP_CHECKSIG])), ("dummy", CScript([OP_RETURN]))]
tap = taproot_construct(pubs[1], scripts)
if not p2sh and witver == 1 and witlen == 32:
add_spender(spenders, "applic/keypath", p2sh=p2sh, spk_mutate_pre_p2sh=mutate, tap=tap, key=secs[1], **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)
add_spender(spenders, "applic/scriptpath", p2sh=p2sh, leaf="s0", spk_mutate_pre_p2sh=mutate, tap=tap, key=secs[0], **SINGLE_SIG, failure={"leaf": "dummy"}, **ERR_OP_RETURN)
else:
add_spender(spenders, "applic/keypath", p2sh=p2sh, spk_mutate_pre_p2sh=mutate, tap=tap, key=secs[1], standard=False)
add_spender(spenders, "applic/scriptpath", p2sh=p2sh, leaf="s0", spk_mutate_pre_p2sh=mutate, tap=tap, key=secs[0], **SINGLE_SIG, standard=False)
# == Test various aspects of BIP341 spending paths ==
# A set of functions that compute the hashing partner in a Merkle tree, designed to exercise
# edge cases. This relies on the taproot_construct feature that a lambda can be passed in
# instead of a subtree, to compute the partner to be hashed with.
PARTNER_MERKLE_FN = [
# Combine with itself
lambda h: h,
# Combine with hash 0
lambda h: bytes([0 for _ in range(32)]),
# Combine with hash 2^256-1
lambda h: bytes([0xff for _ in range(32)]),
# Combine with itself-1 (BE)
lambda h: (int.from_bytes(h, 'big') - 1).to_bytes(32, 'big'),
# Combine with itself+1 (BE)
lambda h: (int.from_bytes(h, 'big') + 1).to_bytes(32, 'big'),
# Combine with itself-1 (LE)
lambda h: (int.from_bytes(h, 'little') - 1).to_bytes(32, 'big'),
# Combine with itself+1 (LE)
lambda h: (int.from_bytes(h, 'little') + 1).to_bytes(32, 'little'),
# Combine with random bitflipped version of self.
lambda h: (int.from_bytes(h, 'little') ^ (1 << random.randrange(256))).to_bytes(32, 'little')
]
# Start with a tree of that has depth 1 for "128deep" and depth 2 for "129deep".
scripts = [("128deep", CScript([pubs[0], OP_CHECKSIG])), [("129deep", CScript([pubs[0], OP_CHECKSIG])), random.choice(PARTNER_MERKLE_FN)]]
# Add 127 nodes on top of that tree, so that "128deep" and "129deep" end up at their designated depths.
for _ in range(127):
scripts = [scripts, random.choice(PARTNER_MERKLE_FN)]
tap = taproot_construct(pubs[0], scripts)
# Test that spends with a depth of 128 work, but 129 doesn't (even with a tree with weird Merkle branches in it).
add_spender(spenders, "spendpath/merklelimit", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"leaf": "129deep"}, **ERR_CONTROLBLOCK_SIZE)
# Test that flipping the negation bit invalidates spends.
add_spender(spenders, "spendpath/negflag", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"negflag": lambda ctx: 1 - default_negflag(ctx)}, **ERR_WITNESS_PROGRAM_MISMATCH)
# Test that bitflips in the Merkle branch invalidate it.
add_spender(spenders, "spendpath/bitflipmerkle", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"merklebranch": bitflipper(default_merklebranch)}, **ERR_WITNESS_PROGRAM_MISMATCH)
# Test that bitflips in the internal pubkey invalidate it.
add_spender(spenders, "spendpath/bitflippubkey", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"pubkey_internal": bitflipper(default_pubkey_internal)}, **ERR_WITNESS_PROGRAM_MISMATCH)
# Test that empty witnesses are invalid.
add_spender(spenders, "spendpath/emptywit", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"witness": []}, **ERR_EMPTY_WITNESS)
# Test that adding garbage to the control block invalidates it.
add_spender(spenders, "spendpath/padlongcontrol", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"controlblock": lambda ctx: default_controlblock(ctx) + random_bytes(random.randrange(1, 32))}, **ERR_CONTROLBLOCK_SIZE)
# Test that truncating the control block invalidates it.
add_spender(spenders, "spendpath/trunclongcontrol", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"controlblock": lambda ctx: default_merklebranch(ctx)[0:random.randrange(1, 32)]}, **ERR_CONTROLBLOCK_SIZE)
scripts = [("s", CScript([pubs[0], OP_CHECKSIG]))]
tap = taproot_construct(pubs[1], scripts)
# Test that adding garbage to the control block invalidates it.
add_spender(spenders, "spendpath/padshortcontrol", tap=tap, leaf="s", **SINGLE_SIG, key=secs[0], failure={"controlblock": lambda ctx: default_controlblock(ctx) + random_bytes(random.randrange(1, 32))}, **ERR_CONTROLBLOCK_SIZE)
# Test that truncating the control block invalidates it.
add_spender(spenders, "spendpath/truncshortcontrol", tap=tap, leaf="s", **SINGLE_SIG, key=secs[0], failure={"controlblock": lambda ctx: default_merklebranch(ctx)[0:random.randrange(1, 32)]}, **ERR_CONTROLBLOCK_SIZE)
# Test that truncating the control block to 1 byte ("-1 Merkle length") invalidates it
add_spender(spenders, "spendpath/trunc1shortcontrol", tap=tap, leaf="s", **SINGLE_SIG, key=secs[0], failure={"controlblock": lambda ctx: default_merklebranch(ctx)[0:1]}, **ERR_CONTROLBLOCK_SIZE)
# == Test BIP342 edge cases ==
csa_low_val = random.randrange(0, 17) # Within range for OP_n
csa_low_result = csa_low_val + 1
csa_high_val = random.randrange(17, 100) if random.getrandbits(1) else random.randrange(-100, -1) # Outside OP_n range
csa_high_result = csa_high_val + 1
OVERSIZE_NUMBER = 2**31
assert_equal(len(CScriptNum.encode(CScriptNum(OVERSIZE_NUMBER))), 6)
assert_equal(len(CScriptNum.encode(CScriptNum(OVERSIZE_NUMBER-1))), 5)
big_choices = []
big_scriptops = []
for i in range(1000):
r = random.randrange(len(pubs))
big_choices.append(r)
big_scriptops += [pubs[r], OP_CHECKSIGVERIFY]
def big_spend_inputs(ctx):
"""Helper function to construct the script input for t33/t34 below."""
# Instead of signing 999 times, precompute signatures for every (key, hashtype) combination
sigs = {}
for ht in VALID_SIGHASHES_TAPROOT:
for k in range(len(pubs)):
sigs[(k, ht)] = override(default_sign, hashtype=ht, key=secs[k])(ctx)
num = get(ctx, "num")
return [sigs[(big_choices[i], random.choice(VALID_SIGHASHES_TAPROOT))] for i in range(num - 1, -1, -1)]
# Various BIP342 features
scripts = [
# 0) drop stack element and OP_CHECKSIG
("t0", CScript([OP_DROP, pubs[1], OP_CHECKSIG])),
# 1) normal OP_CHECKSIG
("t1", CScript([pubs[1], OP_CHECKSIG])),
# 2) normal OP_CHECKSIGVERIFY
("t2", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_1])),
# 3) Hypothetical OP_CHECKMULTISIG script that takes a single sig as input
("t3", CScript([OP_0, OP_SWAP, OP_1, pubs[1], OP_1, OP_CHECKMULTISIG])),
# 4) Hypothetical OP_CHECKMULTISIGVERIFY script that takes a single sig as input
("t4", CScript([OP_0, OP_SWAP, OP_1, pubs[1], OP_1, OP_CHECKMULTISIGVERIFY, OP_1])),
# 5) OP_IF script that needs a true input
("t5", CScript([OP_IF, pubs[1], OP_CHECKSIG, OP_ELSE, OP_RETURN, OP_ENDIF])),
# 6) OP_NOTIF script that needs a true input
("t6", CScript([OP_NOTIF, OP_RETURN, OP_ELSE, pubs[1], OP_CHECKSIG, OP_ENDIF])),
# 7) OP_CHECKSIG with an empty key
("t7", CScript([OP_0, OP_CHECKSIG])),
# 8) OP_CHECKSIGVERIFY with an empty key
("t8", CScript([OP_0, OP_CHECKSIGVERIFY, OP_1])),
# 9) normal OP_CHECKSIGADD that also ensures return value is correct
("t9", CScript([csa_low_val, pubs[1], OP_CHECKSIGADD, csa_low_result, OP_EQUAL])),
# 10) OP_CHECKSIGADD with empty key
("t10", CScript([csa_low_val, OP_0, OP_CHECKSIGADD, csa_low_result, OP_EQUAL])),
# 11) OP_CHECKSIGADD with missing counter stack element
("t11", CScript([pubs[1], OP_CHECKSIGADD, OP_1, OP_EQUAL])),
# 12) OP_CHECKSIG that needs invalid signature
("t12", CScript([pubs[1], OP_CHECKSIGVERIFY, pubs[0], OP_CHECKSIG, OP_NOT])),
# 13) OP_CHECKSIG with empty key that needs invalid signature
("t13", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_0, OP_CHECKSIG, OP_NOT])),
# 14) OP_CHECKSIGADD that needs invalid signature
("t14", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_0, pubs[0], OP_CHECKSIGADD, OP_NOT])),
# 15) OP_CHECKSIGADD with empty key that needs invalid signature
("t15", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_0, OP_0, OP_CHECKSIGADD, OP_NOT])),
# 16) OP_CHECKSIG with unknown pubkey type
("t16", CScript([OP_1, OP_CHECKSIG])),
# 17) OP_CHECKSIGADD with unknown pubkey type
("t17", CScript([OP_0, OP_1, OP_CHECKSIGADD])),
# 18) OP_CHECKSIGVERIFY with unknown pubkey type
("t18", CScript([OP_1, OP_CHECKSIGVERIFY, OP_1])),
# 19) script longer than 10000 bytes and over 201 non-push opcodes
("t19", CScript([OP_0, OP_0, OP_2DROP] * 10001 + [pubs[1], OP_CHECKSIG])),
# 20) OP_CHECKSIGVERIFY with empty key
("t20", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_0, OP_0, OP_CHECKSIGVERIFY, OP_1])),
# 21) Script that grows the stack to 1000 elements
("t21", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_1] + [OP_DUP] * 999 + [OP_DROP] * 999)),
# 22) Script that grows the stack to 1001 elements
("t22", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_1] + [OP_DUP] * 1000 + [OP_DROP] * 1000)),
# 23) Script that expects an input stack of 1000 elements
("t23", CScript([OP_DROP] * 999 + [pubs[1], OP_CHECKSIG])),
# 24) Script that expects an input stack of 1001 elements
("t24", CScript([OP_DROP] * 1000 + [pubs[1], OP_CHECKSIG])),
# 25) Script that pushes a MAX_SCRIPT_ELEMENT_SIZE-bytes element
("t25", CScript([random_bytes(MAX_SCRIPT_ELEMENT_SIZE), OP_DROP, pubs[1], OP_CHECKSIG])),
# 26) Script that pushes a (MAX_SCRIPT_ELEMENT_SIZE+1)-bytes element
("t26", CScript([random_bytes(MAX_SCRIPT_ELEMENT_SIZE+1), OP_DROP, pubs[1], OP_CHECKSIG])),
# 27) CHECKSIGADD that must fail because numeric argument number is >4 bytes
("t27", CScript([CScriptNum(OVERSIZE_NUMBER), pubs[1], OP_CHECKSIGADD])),
# 28) Pushes random CScriptNum value, checks OP_CHECKSIGADD result
("t28", CScript([csa_high_val, pubs[1], OP_CHECKSIGADD, csa_high_result, OP_EQUAL])),
# 29) CHECKSIGADD that succeeds with proper sig because numeric argument number is <=4 bytes
("t29", CScript([CScriptNum(OVERSIZE_NUMBER-1), pubs[1], OP_CHECKSIGADD])),
# 30) Variant of t1 with "normal" 33-byte pubkey
("t30", CScript([b'\x03' + pubs[1], OP_CHECKSIG])),
# 31) Variant of t2 with "normal" 33-byte pubkey
("t31", CScript([b'\x02' + pubs[1], OP_CHECKSIGVERIFY, OP_1])),
# 32) Variant of t28 with "normal" 33-byte pubkey
("t32", CScript([csa_high_val, b'\x03' + pubs[1], OP_CHECKSIGADD, csa_high_result, OP_EQUAL])),
# 33) 999-of-999 multisig
("t33", CScript(big_scriptops[:1998] + [OP_1])),
# 34) 1000-of-1000 multisig
("t34", CScript(big_scriptops[:2000] + [OP_1])),
# 35) Variant of t9 that uses a non-minimally encoded input arg
("t35", CScript([bytes([csa_low_val]), pubs[1], OP_CHECKSIGADD, csa_low_result, OP_EQUAL])),
# 36) Empty script
("t36", CScript([])),
]
# Add many dummies to test huge trees
for j in range(100000):
scripts.append((None, CScript([OP_RETURN, random.randrange(100000)])))
random.shuffle(scripts)
tap = taproot_construct(pubs[0], scripts)
common = {
"hashtype": hashtype,
"key": secs[1],
"tap": tap,
}
# Test that MAX_SCRIPT_ELEMENT_SIZE byte stack element inputs are valid, but not one more (and 80 bytes is standard but 81 is not).
add_spender(spenders, "tapscript/inputmaxlimit", leaf="t0", **common, standard=False, inputs=[getter("sign"), random_bytes(MAX_SCRIPT_ELEMENT_SIZE)], failure={"inputs": [getter("sign"), random_bytes(MAX_SCRIPT_ELEMENT_SIZE+1)]}, **ERR_PUSH_LIMIT)
add_spender(spenders, "tapscript/input80limit", leaf="t0", **common, inputs=[getter("sign"), random_bytes(80)])
add_spender(spenders, "tapscript/input81limit", leaf="t0", **common, standard=False, inputs=[getter("sign"), random_bytes(81)])
# Test that OP_CHECKMULTISIG and OP_CHECKMULTISIGVERIFY cause failure, but OP_CHECKSIG and OP_CHECKSIGVERIFY work.
add_spender(spenders, "tapscript/disabled_checkmultisig", leaf="t1", **common, **SINGLE_SIG, failure={"leaf": "t3"}, **ERR_TAPSCRIPT_CHECKMULTISIG)
add_spender(spenders, "tapscript/disabled_checkmultisigverify", leaf="t2", **common, **SINGLE_SIG, failure={"leaf": "t4"}, **ERR_TAPSCRIPT_CHECKMULTISIG)
# Test that OP_IF and OP_NOTIF do not accept non-0x01 as truth value (the MINIMALIF rule is consensus in Tapscript)
add_spender(spenders, "tapscript/minimalif", leaf="t5", **common, inputs=[getter("sign"), b'\x01'], failure={"inputs": [getter("sign"), b'\x02']}, **ERR_MINIMALIF)
add_spender(spenders, "tapscript/minimalnotif", leaf="t6", **common, inputs=[getter("sign"), b'\x01'], failure={"inputs": [getter("sign"), b'\x03']}, **ERR_MINIMALIF)
add_spender(spenders, "tapscript/minimalif", leaf="t5", **common, inputs=[getter("sign"), b'\x01'], failure={"inputs": [getter("sign"), b'\x0001']}, **ERR_MINIMALIF)
add_spender(spenders, "tapscript/minimalnotif", leaf="t6", **common, inputs=[getter("sign"), b'\x01'], failure={"inputs": [getter("sign"), b'\x0100']}, **ERR_MINIMALIF)
# Test that 1-byte public keys (which are unknown) are acceptable but nonstandard with unrelated signatures, but 0-byte public keys are not valid.
add_spender(spenders, "tapscript/unkpk/checksig", leaf="t16", standard=False, **common, **SINGLE_SIG, failure={"leaf": "t7"}, **ERR_UNKNOWN_PUBKEY)
add_spender(spenders, "tapscript/unkpk/checksigadd", leaf="t17", standard=False, **common, **SINGLE_SIG, failure={"leaf": "t10"}, **ERR_UNKNOWN_PUBKEY)
add_spender(spenders, "tapscript/unkpk/checksigverify", leaf="t18", standard=False, **common, **SINGLE_SIG, failure={"leaf": "t8"}, **ERR_UNKNOWN_PUBKEY)
# Test that 33-byte public keys (which are unknown) are acceptable but nonstandard with valid signatures, but normal pubkeys are not valid in that case.
add_spender(spenders, "tapscript/oldpk/checksig", leaf="t30", standard=False, **common, **SINGLE_SIG, sighash=bitflipper(default_sighash), failure={"leaf": "t1"}, **ERR_SIG_SCHNORR)
add_spender(spenders, "tapscript/oldpk/checksigadd", leaf="t31", standard=False, **common, **SINGLE_SIG, sighash=bitflipper(default_sighash), failure={"leaf": "t2"}, **ERR_SIG_SCHNORR)
add_spender(spenders, "tapscript/oldpk/checksigverify", leaf="t32", standard=False, **common, **SINGLE_SIG, sighash=bitflipper(default_sighash), failure={"leaf": "t28"}, **ERR_SIG_SCHNORR)
# Test that 0-byte public keys are not acceptable.
add_spender(spenders, "tapscript/emptypk/checksig", leaf="t1", **SINGLE_SIG, **common, failure={"leaf": "t7"}, **ERR_UNKNOWN_PUBKEY)
add_spender(spenders, "tapscript/emptypk/checksigverify", leaf="t2", **SINGLE_SIG, **common, failure={"leaf": "t8"}, **ERR_UNKNOWN_PUBKEY)
add_spender(spenders, "tapscript/emptypk/checksigadd", leaf="t9", **SINGLE_SIG, **common, failure={"leaf": "t10"}, **ERR_UNKNOWN_PUBKEY)
add_spender(spenders, "tapscript/emptypk/checksigadd", leaf="t35", standard=False, **SINGLE_SIG, **common, failure={"leaf": "t10"}, **ERR_UNKNOWN_PUBKEY)
# Test that OP_CHECKSIGADD results are as expected
add_spender(spenders, "tapscript/checksigaddresults", leaf="t28", **SINGLE_SIG, **common, failure={"leaf": "t27"}, err_msg="unknown error")
add_spender(spenders, "tapscript/checksigaddoversize", leaf="t29", **SINGLE_SIG, **common, failure={"leaf": "t27"}, err_msg="unknown error")
# Test that OP_CHECKSIGADD requires 3 stack elements.
add_spender(spenders, "tapscript/checksigadd3args", leaf="t9", **SINGLE_SIG, **common, failure={"leaf": "t11"}, **ERR_STACK_EMPTY)
# Test that empty signatures do not cause script failure in OP_CHECKSIG and OP_CHECKSIGADD (but do fail with empty pubkey, and do fail OP_CHECKSIGVERIFY)
add_spender(spenders, "tapscript/emptysigs/checksig", leaf="t12", **common, inputs=[b'', getter("sign")], failure={"leaf": "t13"}, **ERR_UNKNOWN_PUBKEY)
add_spender(spenders, "tapscript/emptysigs/nochecksigverify", leaf="t12", **common, inputs=[b'', getter("sign")], failure={"leaf": "t20"}, **ERR_UNKNOWN_PUBKEY)
add_spender(spenders, "tapscript/emptysigs/checksigadd", leaf="t14", **common, inputs=[b'', getter("sign")], failure={"leaf": "t15"}, **ERR_UNKNOWN_PUBKEY)
# Test that scripts over 10000 bytes (and over 201 non-push ops) are acceptable.
add_spender(spenders, "tapscript/no10000limit", leaf="t19", **SINGLE_SIG, **common)
# Test that a stack size of 1000 elements is permitted, but 1001 isn't.
add_spender(spenders, "tapscript/1000stack", leaf="t21", **SINGLE_SIG, **common, failure={"leaf": "t22"}, **ERR_STACK_SIZE)
# Test that an input stack size of 1000 elements is permitted, but 1001 isn't.
add_spender(spenders, "tapscript/1000inputs", leaf="t23", **common, inputs=[getter("sign")] + [b'' for _ in range(999)], failure={"leaf": "t24", "inputs": [getter("sign")] + [b'' for _ in range(1000)]}, **ERR_STACK_SIZE)
# Test that pushing a MAX_SCRIPT_ELEMENT_SIZE byte stack element is valid, but one longer is not.
add_spender(spenders, "tapscript/pushmaxlimit", leaf="t25", **common, **SINGLE_SIG, failure={"leaf": "t26"}, **ERR_PUSH_LIMIT)
# Test that 999-of-999 multisig works (but 1000-of-1000 triggers stack size limits)
add_spender(spenders, "tapscript/bigmulti", leaf="t33", **common, inputs=big_spend_inputs, num=999, failure={"leaf": "t34", "num": 1000}, **ERR_STACK_SIZE)
# Test that the CLEANSTACK rule is consensus critical in tapscript
add_spender(spenders, "tapscript/cleanstack", leaf="t36", tap=tap, inputs=[b'\x01'], failure={"inputs": [b'\x01', b'\x01']}, **ERR_CLEANSTACK)
# == Test for sigops ratio limit ==
# Given a number n, and a public key pk, functions that produce a (CScript, sigops). Each script takes as
# input a valid signature with the passed pk followed by a dummy push of bytes that are to be dropped, and
# will execute sigops signature checks.
SIGOPS_RATIO_SCRIPTS = [
# n OP_CHECKSIGVERFIYs and 1 OP_CHECKSIG.
lambda n, pk: (CScript([OP_DROP, pk] + [OP_2DUP, OP_CHECKSIGVERIFY] * n + [OP_CHECKSIG]), n + 1),
# n OP_CHECKSIGVERIFYs and 1 OP_CHECKSIGADD, but also one unexecuted OP_CHECKSIGVERIFY.
lambda n, pk: (CScript([OP_DROP, pk, OP_0, OP_IF, OP_2DUP, OP_CHECKSIGVERIFY, OP_ENDIF] + [OP_2DUP, OP_CHECKSIGVERIFY] * n + [OP_2, OP_SWAP, OP_CHECKSIGADD, OP_3, OP_EQUAL]), n + 1),
# n OP_CHECKSIGVERIFYs and 1 OP_CHECKSIGADD, but also one unexecuted OP_CHECKSIG.
lambda n, pk: (CScript([random_bytes(220), OP_2DROP, pk, OP_1, OP_NOTIF, OP_2DUP, OP_CHECKSIG, OP_VERIFY, OP_ENDIF] + [OP_2DUP, OP_CHECKSIGVERIFY] * n + [OP_4, OP_SWAP, OP_CHECKSIGADD, OP_5, OP_EQUAL]), n + 1),
# n OP_CHECKSIGVERFIYs and 1 OP_CHECKSIGADD, but also one unexecuted OP_CHECKSIGADD.
lambda n, pk: (CScript([OP_DROP, pk, OP_1, OP_IF, OP_ELSE, OP_2DUP, OP_6, OP_SWAP, OP_CHECKSIGADD, OP_7, OP_EQUALVERIFY, OP_ENDIF] + [OP_2DUP, OP_CHECKSIGVERIFY] * n + [OP_8, OP_SWAP, OP_CHECKSIGADD, OP_9, OP_EQUAL]), n + 1),
# n+1 OP_CHECKSIGs, but also one OP_CHECKSIG with an empty signature.
lambda n, pk: (CScript([OP_DROP, OP_0, pk, OP_CHECKSIG, OP_NOT, OP_VERIFY, pk] + [OP_2DUP, OP_CHECKSIG, OP_VERIFY] * n + [OP_CHECKSIG]), n + 1),
# n OP_CHECKSIGADDs and 1 OP_CHECKSIG, but also an OP_CHECKSIGADD with an empty signature.
lambda n, pk: (CScript([OP_DROP, OP_0, OP_10, pk, OP_CHECKSIGADD, OP_10, OP_EQUALVERIFY, pk] + [OP_2DUP, OP_16, OP_SWAP, OP_CHECKSIGADD, b'\x11', OP_EQUALVERIFY] * n + [OP_CHECKSIG]), n + 1),
]
for annex in [None, bytes([ANNEX_TAG]) + random_bytes(random.randrange(1000))]:
for hashtype in [SIGHASH_DEFAULT, SIGHASH_ALL]:
for pubkey in [pubs[1], random_bytes(random.choice([x for x in range(2, 81) if x != 32]))]:
for fn_num, fn in enumerate(SIGOPS_RATIO_SCRIPTS):
merkledepth = random.randrange(129)
def predict_sigops_ratio(n, dummy_size):
"""Predict whether spending fn(n, pubkey) with dummy_size will pass the ratio test."""
script, sigops = fn(n, pubkey)
# Predict the size of the witness for a given choice of n
stacklen_size = 1
sig_size = 64 + (hashtype != SIGHASH_DEFAULT)
siglen_size = 1
dummylen_size = 1 + 2 * (dummy_size >= 253)
script_size = len(script)
scriptlen_size = 1 + 2 * (script_size >= 253)
control_size = 33 + 32 * merkledepth
controllen_size = 1 + 2 * (control_size >= 253)
annex_size = 0 if annex is None else len(annex)
annexlen_size = 0 if annex is None else 1 + 2 * (annex_size >= 253)
witsize = stacklen_size + sig_size + siglen_size + dummy_size + dummylen_size + script_size + scriptlen_size + control_size + controllen_size + annex_size + annexlen_size
# sigops ratio test
return witsize + 50 >= 50 * sigops
# Make sure n is high enough that with empty dummy, the script is not valid
n = 0
while predict_sigops_ratio(n, 0):
n += 1
# But allow picking a bit higher still
n += random.randrange(5)
# Now pick dummy size *just* large enough that the overall construction passes
dummylen = 0
while not predict_sigops_ratio(n, dummylen):
dummylen += 1
scripts = [("s", fn(n, pubkey)[0])]
for _ in range(merkledepth):
scripts = [scripts, random.choice(PARTNER_MERKLE_FN)]
tap = taproot_construct(pubs[0], scripts)
standard = annex is None and dummylen <= 80 and len(pubkey) == 32
add_spender(spenders, "tapscript/sigopsratio_%i" % fn_num, tap=tap, leaf="s", annex=annex, hashtype=hashtype, key=secs[1], inputs=[getter("sign"), random_bytes(dummylen)], standard=standard, failure={"inputs": [getter("sign"), random_bytes(dummylen - 1)]}, **ERR_SIGOPS_RATIO)
# Future leaf versions
for leafver in range(0, 0x100, 2):
if leafver == LEAF_VERSION_TAPSCRIPT or leafver == ANNEX_TAG:
# Skip the defined LEAF_VERSION_TAPSCRIPT, and the ANNEX_TAG which is not usable as leaf version
continue
scripts = [
("bare_c0", CScript([OP_NOP])),
("bare_unkver", CScript([OP_NOP]), leafver),
("return_c0", CScript([OP_RETURN])),
("return_unkver", CScript([OP_RETURN]), leafver),
("undecodable_c0", CScript([OP_PUSHDATA1])),
("undecodable_unkver", CScript([OP_PUSHDATA1]), leafver),
("bigpush_c0", CScript([random_bytes(MAX_SCRIPT_ELEMENT_SIZE+1), OP_DROP])),
("bigpush_unkver", CScript([random_bytes(MAX_SCRIPT_ELEMENT_SIZE+1), OP_DROP]), leafver),
("1001push_c0", CScript([OP_0] * 1001)),
("1001push_unkver", CScript([OP_0] * 1001), leafver),
]
random.shuffle(scripts)
tap = taproot_construct(pubs[0], scripts)
add_spender(spenders, "unkver/bare", standard=False, tap=tap, leaf="bare_unkver", failure={"leaf": "bare_c0"}, **ERR_CLEANSTACK)
add_spender(spenders, "unkver/return", standard=False, tap=tap, leaf="return_unkver", failure={"leaf": "return_c0"}, **ERR_OP_RETURN)
add_spender(spenders, "unkver/undecodable", standard=False, tap=tap, leaf="undecodable_unkver", failure={"leaf": "undecodable_c0"}, **ERR_UNDECODABLE)
add_spender(spenders, "unkver/bigpush", standard=False, tap=tap, leaf="bigpush_unkver", failure={"leaf": "bigpush_c0"}, **ERR_PUSH_LIMIT)
add_spender(spenders, "unkver/1001push", standard=False, tap=tap, leaf="1001push_unkver", failure={"leaf": "1001push_c0"}, **ERR_STACK_SIZE)
add_spender(spenders, "unkver/1001inputs", standard=False, tap=tap, leaf="bare_unkver", inputs=[b'']*1001, failure={"leaf": "bare_c0"}, **ERR_STACK_SIZE)
# OP_SUCCESSx tests.
hashtype = lambda _: random.choice(VALID_SIGHASHES_TAPROOT)
for opval in range(76, 0x100):
opcode = CScriptOp(opval)
if not is_op_success(opcode):
continue
scripts = [
("bare_success", CScript([opcode])),
("bare_nop", CScript([OP_NOP])),
("unexecif_success", CScript([OP_0, OP_IF, opcode, OP_ENDIF])),
("unexecif_nop", CScript([OP_0, OP_IF, OP_NOP, OP_ENDIF])),
("return_success", CScript([OP_RETURN, opcode])),
("return_nop", CScript([OP_RETURN, OP_NOP])),
("undecodable_success", CScript([opcode, OP_PUSHDATA1])),
("undecodable_nop", CScript([OP_NOP, OP_PUSHDATA1])),
("undecodable_bypassed_success", CScript([OP_PUSHDATA1, OP_2, opcode])),
("bigpush_success", CScript([random_bytes(MAX_SCRIPT_ELEMENT_SIZE+1), OP_DROP, opcode])),
("bigpush_nop", CScript([random_bytes(MAX_SCRIPT_ELEMENT_SIZE+1), OP_DROP, OP_NOP])),
("1001push_success", CScript([OP_0] * 1001 + [opcode])),
("1001push_nop", CScript([OP_0] * 1001 + [OP_NOP])),
]
random.shuffle(scripts)
tap = taproot_construct(pubs[0], scripts)
add_spender(spenders, "opsuccess/bare", standard=False, tap=tap, leaf="bare_success", failure={"leaf": "bare_nop"}, **ERR_CLEANSTACK)
add_spender(spenders, "opsuccess/unexecif", standard=False, tap=tap, leaf="unexecif_success", failure={"leaf": "unexecif_nop"}, **ERR_CLEANSTACK)
add_spender(spenders, "opsuccess/return", standard=False, tap=tap, leaf="return_success", failure={"leaf": "return_nop"}, **ERR_OP_RETURN)
add_spender(spenders, "opsuccess/undecodable", standard=False, tap=tap, leaf="undecodable_success", failure={"leaf": "undecodable_nop"}, **ERR_UNDECODABLE)
add_spender(spenders, "opsuccess/undecodable_bypass", standard=False, tap=tap, leaf="undecodable_success", failure={"leaf": "undecodable_bypassed_success"}, **ERR_UNDECODABLE)
add_spender(spenders, "opsuccess/bigpush", standard=False, tap=tap, leaf="bigpush_success", failure={"leaf": "bigpush_nop"}, **ERR_PUSH_LIMIT)
add_spender(spenders, "opsuccess/1001push", standard=False, tap=tap, leaf="1001push_success", failure={"leaf": "1001push_nop"}, **ERR_STACK_SIZE)
add_spender(spenders, "opsuccess/1001inputs", standard=False, tap=tap, leaf="bare_success", inputs=[b'']*1001, failure={"leaf": "bare_nop"}, **ERR_STACK_SIZE)
# Non-OP_SUCCESSx (verify that those aren't accidentally treated as OP_SUCCESSx)
for opval in range(0, 0x100):
opcode = CScriptOp(opval)
if is_op_success(opcode):
continue
scripts = [
("normal", CScript([OP_RETURN, opcode] + [OP_NOP] * 75)),
("op_success", CScript([OP_RETURN, CScriptOp(0x50)]))
]
tap = taproot_construct(pubs[0], scripts)
add_spender(spenders, "alwaysvalid/notsuccessx", tap=tap, leaf="op_success", inputs=[], standard=False, failure={"leaf": "normal"}) # err_msg differs based on opcode
# == Legacy tests ==
# Also add a few legacy spends into the mix, so that transactions which combine taproot and pre-taproot spends get tested too.
for compressed in [False, True]:
eckey1 = ECKey()
eckey1.set(generate_privkey(), compressed)
pubkey1 = eckey1.get_pubkey().get_bytes()
eckey2 = ECKey()
eckey2.set(generate_privkey(), compressed)
for p2sh in [False, True]:
for witv0 in [False, True]:
for hashtype in VALID_SIGHASHES_ECDSA + [random.randrange(0x04, 0x80), random.randrange(0x84, 0x100)]:
standard = (hashtype in VALID_SIGHASHES_ECDSA) and (compressed or not witv0)
add_spender(spenders, "legacy/pk-wrongkey", hashtype=hashtype, p2sh=p2sh, witv0=witv0, standard=standard, script=CScript([pubkey1, OP_CHECKSIG]), **SINGLE_SIG, key=eckey1, failure={"key": eckey2}, sigops_weight=4-3*witv0, **ERR_NO_SUCCESS)
add_spender(spenders, "legacy/pkh-sighashflip", hashtype=hashtype, p2sh=p2sh, witv0=witv0, standard=standard, pkh=pubkey1, key=eckey1, **SIGHASH_BITFLIP, sigops_weight=4-3*witv0, **ERR_NO_SUCCESS)
# Verify that OP_CHECKSIGADD wasn't accidentally added to pre-taproot validation logic.
for p2sh in [False, True]:
for witv0 in [False, True]:
for hashtype in VALID_SIGHASHES_ECDSA + [random.randrange(0x04, 0x80), random.randrange(0x84, 0x100)]:
standard = hashtype in VALID_SIGHASHES_ECDSA and (p2sh or witv0)
add_spender(spenders, "compat/nocsa", hashtype=hashtype, p2sh=p2sh, witv0=witv0, standard=standard, script=CScript([OP_IF, OP_11, pubkey1, OP_CHECKSIGADD, OP_12, OP_EQUAL, OP_ELSE, pubkey1, OP_CHECKSIG, OP_ENDIF]), key=eckey1, sigops_weight=4-3*witv0, inputs=[getter("sign"), b''], failure={"inputs": [getter("sign"), b'\x01']}, **ERR_UNDECODABLE)
return spenders
def spenders_taproot_inactive():
"""Spenders for testing that pre-activation Taproot rules don't apply."""
spenders = []
sec = generate_privkey()
pub, _ = compute_xonly_pubkey(sec)
scripts = [
("pk", CScript([pub, OP_CHECKSIG])),
("future_leaf", CScript([pub, OP_CHECKSIG]), 0xc2),
("op_success", CScript([pub, OP_CHECKSIG, OP_0, OP_IF, CScriptOp(0x50), OP_ENDIF])),
]
tap = taproot_construct(pub, scripts)
# Test that keypath spending is valid & non-standard, regardless of validity.
add_spender(spenders, "inactive/keypath_valid", key=sec, tap=tap, standard=False)
add_spender(spenders, "inactive/keypath_invalidsig", key=sec, tap=tap, standard=False, sighash=bitflipper(default_sighash))
add_spender(spenders, "inactive/keypath_empty", key=sec, tap=tap, standard=False, witness=[])
# Same for scriptpath spending (and features like annex, leaf versions, or OP_SUCCESS don't change this)
add_spender(spenders, "inactive/scriptpath_valid", key=sec, tap=tap, leaf="pk", standard=False, inputs=[getter("sign")])
add_spender(spenders, "inactive/scriptpath_invalidsig", key=sec, tap=tap, leaf="pk", standard=False, inputs=[getter("sign")], sighash=bitflipper(default_sighash))
add_spender(spenders, "inactive/scriptpath_invalidcb", key=sec, tap=tap, leaf="pk", standard=False, inputs=[getter("sign")], controlblock=bitflipper(default_controlblock))
add_spender(spenders, "inactive/scriptpath_valid_unkleaf", key=sec, tap=tap, leaf="future_leaf", standard=False, inputs=[getter("sign")])
add_spender(spenders, "inactive/scriptpath_invalid_unkleaf", key=sec, tap=tap, leaf="future_leaf", standard=False, inputs=[getter("sign")], sighash=bitflipper(default_sighash))
add_spender(spenders, "inactive/scriptpath_valid_opsuccess", key=sec, tap=tap, leaf="op_success", standard=False, inputs=[getter("sign")])
add_spender(spenders, "inactive/scriptpath_valid_opsuccess", key=sec, tap=tap, leaf="op_success", standard=False, inputs=[getter("sign")], sighash=bitflipper(default_sighash))
return spenders
# Consensus validation flags to use in dumps for tests with "legacy/" or "inactive/" prefix.
LEGACY_FLAGS = "P2SH,DERSIG,CHECKLOCKTIMEVERIFY,CHECKSEQUENCEVERIFY,WITNESS,NULLDUMMY"
# Consensus validation flags to use in dumps for all other tests.
TAPROOT_FLAGS = "P2SH,DERSIG,CHECKLOCKTIMEVERIFY,CHECKSEQUENCEVERIFY,WITNESS,NULLDUMMY,TAPROOT"
def dump_json_test(tx, input_utxos, idx, success, failure):
spender = input_utxos[idx].spender
# Determine flags to dump
flags = LEGACY_FLAGS if spender.comment.startswith("legacy/") or spender.comment.startswith("inactive/") else TAPROOT_FLAGS
fields = [
("tx", tx.serialize().hex()),
("prevouts", [x.output.serialize().hex() for x in input_utxos]),
("index", idx),
("flags", flags),
("comment", spender.comment)
]
# The "final" field indicates that a spend should be always valid, even with more validation flags enabled
# than the listed ones. Use standardness as a proxy for this (which gives a conservative underestimate).
if spender.is_standard:
fields.append(("final", True))
def dump_witness(wit):
return OrderedDict([("scriptSig", wit[0].hex()), ("witness", [x.hex() for x in wit[1]])])
if success is not None:
fields.append(("success", dump_witness(success)))
if failure is not None:
fields.append(("failure", dump_witness(failure)))
# Write the dump to $TEST_DUMP_DIR/x/xyz... where x,y,z,... are the SHA1 sum of the dump (which makes the
# file naming scheme compatible with fuzzing infrastructure).
dump = json.dumps(OrderedDict(fields)) + ",\n"
sha1 = hashlib.sha1(dump.encode("utf-8")).hexdigest()
dirname = os.environ.get("TEST_DUMP_DIR", ".") + ("/%s" % sha1[0])
os.makedirs(dirname, exist_ok=True)
with open(dirname + ("/%s" % sha1), 'w', encoding="utf8") as f:
f.write(dump)
# Data type to keep track of UTXOs, where they were created, and how to spend them.
UTXOData = namedtuple('UTXOData', 'outpoint,output,spender')
class TaprootTest(DevcoinTestFramework):
def add_options(self, parser):
parser.add_argument("--dumptests", dest="dump_tests", default=False, action="store_true",
help="Dump generated test cases to directory set by TEST_DUMP_DIR environment variable")
parser.add_argument("--previous_release", dest="previous_release", default=False, action="store_true",
help="Use a previous release as taproot-inactive node")
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
if self.options.previous_release:
self.skip_if_no_previous_releases()
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
# Node 0 has Taproot inactive, Node 1 active.
self.extra_args = [["-par=1"], ["-par=1"]]
if self.options.previous_release:
self.wallet_names = [None, self.default_wallet_name]
else:
self.extra_args[0].append("-vbparams=taproot:1:1")
def setup_nodes(self):
self.add_nodes(self.num_nodes, self.extra_args, versions=[
200100 if self.options.previous_release else None,
None,
])
self.start_nodes()
self.import_deterministic_coinbase_privkeys()
def block_submit(self, node, txs, msg, err_msg, cb_pubkey=None, fees=0, sigops_weight=0, witness=False, accept=False):
# Deplete block of any non-tapscript sigops using a single additional 0-value coinbase output.
# It is not impossible to fit enough tapscript sigops to hit the old 80k limit without
# busting txin-level limits. We simply have to account for the p2pk outputs in all
# transactions.
extra_output_script = CScript([OP_CHECKSIG]*((MAX_BLOCK_SIGOPS_WEIGHT - sigops_weight) // WITNESS_SCALE_FACTOR))
block = create_block(self.tip, create_coinbase(self.lastblockheight + 1, pubkey=cb_pubkey, extra_output_script=extra_output_script, fees=fees), self.lastblocktime + 1)
block.nVersion = 4
for tx in txs:
tx.rehash()
block.vtx.append(tx)
block.hashMerkleRoot = block.calc_merkle_root()
witness and add_witness_commitment(block)
block.rehash()
block.solve()
block_response = node.submitblock(block.serialize().hex())
if err_msg is not None:
assert block_response is not None and err_msg in block_response, "Missing error message '%s' from block response '%s': %s" % (err_msg, "(None)" if block_response is None else block_response, msg)
if accept:
assert node.getbestblockhash() == block.hash, "Failed to accept: %s (response: %s)" % (msg, block_response)
self.tip = block.sha256
self.lastblockhash = block.hash
self.lastblocktime += 1
self.lastblockheight += 1
else:
assert node.getbestblockhash() == self.lastblockhash, "Failed to reject: " + msg
def test_spenders(self, node, spenders, input_counts):
"""Run randomized tests with a number of "spenders".
Steps:
1) Generate an appropriate UTXO for each spender to test spend conditions
2) Generate 100 random addresses of all wallet types: pkh/sh_wpkh/wpkh
3) Select random number of inputs from (1)
4) Select random number of addresses from (2) as outputs
Each spender embodies a test; in a large randomized test, it is verified
that toggling the valid argument to each lambda toggles the validity of
the transaction. This is accomplished by constructing transactions consisting
of all valid inputs, except one invalid one.
"""
# Construct a bunch of sPKs that send coins back to the host wallet
self.log.info("- Constructing addresses for returning coins")
host_spks = []
host_pubkeys = []
for i in range(16):
addr = node.getnewaddress(address_type=random.choice(["legacy", "p2sh-segwit", "bech32"]))
info = node.getaddressinfo(addr)
spk = bytes.fromhex(info['scriptPubKey'])
host_spks.append(spk)
host_pubkeys.append(bytes.fromhex(info['pubkey']))
# Initialize variables used by block_submit().
self.lastblockhash = node.getbestblockhash()
self.tip = int(self.lastblockhash, 16)
block = node.getblock(self.lastblockhash)
self.lastblockheight = block['height']
self.lastblocktime = block['time']
# Create transactions spending up to 50 of the wallet's inputs, with one output for each spender, and
# one change output at the end. The transaction is constructed on the Python side to enable
# having multiple outputs to the same address and outputs with no assigned address. The wallet
# is then asked to sign it through signrawtransactionwithwallet, and then added to a block on the
# Python side (to bypass standardness rules).
self.log.info("- Creating test UTXOs...")
random.shuffle(spenders)
normal_utxos = []
mismatching_utxos = [] # UTXOs with input that requires mismatching output position
done = 0
while done < len(spenders):
# Compute how many UTXOs to create with this transaction
count_this_tx = min(len(spenders) - done, (len(spenders) + 4) // 5, 10000)
fund_tx = CTransaction()
# Add the 50 highest-value inputs
unspents = node.listunspent()
random.shuffle(unspents)
unspents.sort(key=lambda x: int(x["amount"] * 100000000), reverse=True)
if len(unspents) > 50:
unspents = unspents[:50]
random.shuffle(unspents)
balance = 0
for unspent in unspents:
balance += int(unspent["amount"] * 100000000)
txid = int(unspent["txid"], 16)
fund_tx.vin.append(CTxIn(COutPoint(txid, int(unspent["vout"])), CScript()))
# Add outputs
cur_progress = done / len(spenders)
next_progress = (done + count_this_tx) / len(spenders)
change_goal = (1.0 - 0.6 * next_progress) / (1.0 - 0.6 * cur_progress) * balance
self.log.debug("Create %i UTXOs in a transaction spending %i inputs worth %.8f (sending ~%.8f to change)" % (count_this_tx, len(unspents), balance * 0.00000001, change_goal * 0.00000001))
for i in range(count_this_tx):
avg = (balance - change_goal) / (count_this_tx - i)
amount = int(random.randrange(int(avg*0.85 + 0.5), int(avg*1.15 + 0.5)) + 0.5)
balance -= amount
fund_tx.vout.append(CTxOut(amount, spenders[done + i].script))
# Add change
fund_tx.vout.append(CTxOut(balance - 10000, random.choice(host_spks)))
# Ask the wallet to sign
ss = BytesIO(bytes.fromhex(node.signrawtransactionwithwallet(fund_tx.serialize().hex())["hex"]))
fund_tx.deserialize(ss)
# Construct UTXOData entries
fund_tx.rehash()
for i in range(count_this_tx):
utxodata = UTXOData(outpoint=COutPoint(fund_tx.sha256, i), output=fund_tx.vout[i], spender=spenders[done])
if utxodata.spender.need_vin_vout_mismatch:
mismatching_utxos.append(utxodata)
else:
normal_utxos.append(utxodata)
done += 1
# Mine into a block
self.block_submit(node, [fund_tx], "Funding tx", None, random.choice(host_pubkeys), 10000, MAX_BLOCK_SIGOPS_WEIGHT, True, True)
# Consume groups of choice(input_coins) from utxos in a tx, testing the spenders.
self.log.info("- Running %i spending tests" % done)
random.shuffle(normal_utxos)
random.shuffle(mismatching_utxos)
assert done == len(normal_utxos) + len(mismatching_utxos)
left = done
while left:
# Construct CTransaction with random nVersion, nLocktime
tx = CTransaction()
tx.nVersion = random.choice([1, 2, random.randint(-0x80000000, 0x7fffffff)])
min_sequence = (tx.nVersion != 1 and tx.nVersion != 0) * 0x80000000 # The minimum sequence number to disable relative locktime
if random.choice([True, False]):
tx.nLockTime = random.randrange(LOCKTIME_THRESHOLD, self.lastblocktime - 7200) # all absolute locktimes in the past
else:
tx.nLockTime = random.randrange(self.lastblockheight + 1) # all block heights in the past
# Decide how many UTXOs to test with.
acceptable = [n for n in input_counts if n <= left and (left - n > max(input_counts) or (left - n) in [0] + input_counts)]
num_inputs = random.choice(acceptable)
# If we have UTXOs that require mismatching inputs/outputs left, include exactly one of those
# unless there is only one normal UTXO left (as tests with mismatching UTXOs require at least one
# normal UTXO to go in the first position), and we don't want to run out of normal UTXOs.
input_utxos = []
while len(mismatching_utxos) and (len(input_utxos) == 0 or len(normal_utxos) == 1):
input_utxos.append(mismatching_utxos.pop())
left -= 1
# Top up until we hit num_inputs (but include at least one normal UTXO always).
for _ in range(max(1, num_inputs - len(input_utxos))):
input_utxos.append(normal_utxos.pop())
left -= 1
# The first input cannot require a mismatching output (as there is at least one output).
while True:
random.shuffle(input_utxos)
if not input_utxos[0].spender.need_vin_vout_mismatch:
break
first_mismatch_input = None
for i in range(len(input_utxos)):
if input_utxos[i].spender.need_vin_vout_mismatch:
first_mismatch_input = i
assert first_mismatch_input is None or first_mismatch_input > 0
# Decide fee, and add CTxIns to tx.
amount = sum(utxo.output.nValue for utxo in input_utxos)
fee = min(random.randrange(MIN_FEE * 2, MIN_FEE * 4), amount - DUST_LIMIT) # 10000-20000 sat fee
in_value = amount - fee
tx.vin = [CTxIn(outpoint=utxo.outpoint, nSequence=random.randint(min_sequence, 0xffffffff)) for utxo in input_utxos]
tx.wit.vtxinwit = [CTxInWitness() for _ in range(len(input_utxos))]
sigops_weight = sum(utxo.spender.sigops_weight for utxo in input_utxos)
self.log.debug("Test: %s" % (", ".join(utxo.spender.comment for utxo in input_utxos)))
# Add 1 to 4 random outputs (but constrained by inputs that require mismatching outputs)
num_outputs = random.choice(range(1, 1 + min(4, 4 if first_mismatch_input is None else first_mismatch_input)))
assert in_value >= 0 and fee - num_outputs * DUST_LIMIT >= MIN_FEE
for i in range(num_outputs):
tx.vout.append(CTxOut())
if in_value <= DUST_LIMIT:
tx.vout[-1].nValue = DUST_LIMIT
elif i < num_outputs - 1:
tx.vout[-1].nValue = in_value
else:
tx.vout[-1].nValue = random.randint(DUST_LIMIT, in_value)
in_value -= tx.vout[-1].nValue
tx.vout[-1].scriptPubKey = random.choice(host_spks)
sigops_weight += CScript(tx.vout[-1].scriptPubKey).GetSigOpCount(False) * WITNESS_SCALE_FACTOR
fee += in_value
assert fee >= 0
# Select coinbase pubkey
cb_pubkey = random.choice(host_pubkeys)
sigops_weight += 1 * WITNESS_SCALE_FACTOR
# Precompute one satisfying and one failing scriptSig/witness for each input.
input_data = []
for i in range(len(input_utxos)):
fn = input_utxos[i].spender.sat_function
fail = None
success = fn(tx, i, [utxo.output for utxo in input_utxos], True)
if not input_utxos[i].spender.no_fail:
fail = fn(tx, i, [utxo.output for utxo in input_utxos], False)
input_data.append((fail, success))
if self.options.dump_tests:
dump_json_test(tx, input_utxos, i, success, fail)
# Sign each input incorrectly once on each complete signing pass, except the very last.
for fail_input in list(range(len(input_utxos))) + [None]:
# Skip trying to fail at spending something that can't be made to fail.
if fail_input is not None and input_utxos[fail_input].spender.no_fail:
continue
# Expected message with each input failure, may be None(which is ignored)
expected_fail_msg = None if fail_input is None else input_utxos[fail_input].spender.err_msg
# Fill inputs/witnesses
for i in range(len(input_utxos)):
tx.vin[i].scriptSig = input_data[i][i != fail_input][0]
tx.wit.vtxinwit[i].scriptWitness.stack = input_data[i][i != fail_input][1]
# Submit to mempool to check standardness
is_standard_tx = fail_input is None and all(utxo.spender.is_standard for utxo in input_utxos) and tx.nVersion >= 1 and tx.nVersion <= 2
tx.rehash()
msg = ','.join(utxo.spender.comment + ("*" if n == fail_input else "") for n, utxo in enumerate(input_utxos))
if is_standard_tx:
node.sendrawtransaction(tx.serialize().hex(), 0)
assert node.getmempoolentry(tx.hash) is not None, "Failed to accept into mempool: " + msg
else:
assert_raises_rpc_error(-26, None, node.sendrawtransaction, tx.serialize().hex(), 0)
# Submit in a block
self.block_submit(node, [tx], msg, witness=True, accept=fail_input is None, cb_pubkey=cb_pubkey, fees=fee, sigops_weight=sigops_weight, err_msg=expected_fail_msg)
if (len(spenders) - left) // 200 > (len(spenders) - left - len(input_utxos)) // 200:
self.log.info(" - %i tests done" % (len(spenders) - left))
assert left == 0
assert len(normal_utxos) == 0
assert len(mismatching_utxos) == 0
self.log.info(" - Done")
def run_test(self):
# Post-taproot activation tests go first (pre-taproot tests' blocks are invalid post-taproot).
self.log.info("Post-activation tests...")
self.nodes[1].generate(COINBASE_MATURITY + 1)
self.test_spenders(self.nodes[1], spenders_taproot_active(), input_counts=[1, 2, 2, 2, 2, 3])
# Re-connect nodes in case they have been disconnected
self.disconnect_nodes(0, 1)
self.connect_nodes(0, 1)
# Transfer value of the largest 500 coins to pre-taproot node.
addr = self.nodes[0].getnewaddress()
unsp = self.nodes[1].listunspent()
unsp = sorted(unsp, key=lambda i: i['amount'], reverse=True)
unsp = unsp[:500]
rawtx = self.nodes[1].createrawtransaction(
inputs=[{
'txid': i['txid'],
'vout': i['vout']
} for i in unsp],
outputs={addr: sum(i['amount'] for i in unsp)}
)
rawtx = self.nodes[1].signrawtransactionwithwallet(rawtx)['hex']
# Mine a block with the transaction
block = create_block(tmpl=self.nodes[1].getblocktemplate(NORMAL_GBT_REQUEST_PARAMS), txlist=[rawtx])
add_witness_commitment(block)
block.rehash()
block.solve()
assert_equal(None, self.nodes[1].submitblock(block.serialize().hex()))
self.sync_blocks()
# Pre-taproot activation tests.
self.log.info("Pre-activation tests...")
# Run each test twice; once in isolation, and once combined with others. Testing in isolation
# means that the standardness is verified in every test (as combined transactions are only standard
# when all their inputs are standard).
self.test_spenders(self.nodes[0], spenders_taproot_inactive(), input_counts=[1])
self.test_spenders(self.nodes[0], spenders_taproot_inactive(), input_counts=[2, 3])
if __name__ == '__main__':
TaprootTest().main()
|
the-stack_0_11727 | # emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 et:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""
"""
__docformat__ = 'restructuredtext'
|
the-stack_0_11729 | import itertools
import logging
from collections import defaultdict, deque
from BaseClasses import DoorType
from Regions import dungeon_events
from Dungeons import dungeon_keys, dungeon_bigs
from DungeonGenerator import ExplorationState, special_big_key_doors
class KeyLayout(object):
def __init__(self, sector, starts, proposal):
self.sector = sector
self.start_regions = starts
self.proposal = proposal
self.key_logic = KeyLogic(sector.name)
self.key_counters = None
self.flat_prop = None
self.max_chests = None
self.max_drops = None
self.all_chest_locations = {}
self.big_key_special = False
self.all_locations = set()
self.item_locations = set()
# bk special?
# bk required? True if big chests or big doors exists
def reset(self, proposal, builder, world, player):
self.proposal = proposal
self.flat_prop = flatten_pair_list(self.proposal)
self.key_logic = KeyLogic(self.sector.name)
self.max_chests = calc_max_chests(builder, self, world, player)
self.all_locations = set()
self.item_locations = set()
class KeyLogic(object):
def __init__(self, dungeon_name):
self.door_rules = {}
self.bk_restricted = set() # subset of free locations
self.bk_locked = set() # includes potentially other locations and key only locations
self.sm_restricted = set()
self.small_key_name = dungeon_keys[dungeon_name]
self.bk_name = dungeon_bigs[dungeon_name]
self.bk_doors = set()
self.bk_chests = set()
self.logic_min = {}
self.logic_max = {}
self.placement_rules = []
self.location_rules = {}
self.outside_keys = 0
self.dungeon = dungeon_name
def check_placement(self, unplaced_keys, big_key_loc=None):
for rule in self.placement_rules:
if not rule.is_satisfiable(self.outside_keys, unplaced_keys):
return False
if big_key_loc:
for rule_a, rule_b in itertools.combinations(self.placement_rules, 2):
if rule_a.contradicts(rule_b, unplaced_keys, big_key_loc):
return False
return True
class DoorRules(object):
def __init__(self, number, is_valid):
self.small_key_num = number
self.is_valid = is_valid
# allowing a different number if bk is behind this door in a set of locations
self.alternate_small_key = None
self.alternate_big_key_loc = set()
# for a place with only 1 free location/key_only_location behind it ... no goals and locations
self.allow_small = False
self.small_location = None
self.opposite = None
class LocationRule(object):
def __init__(self):
self.small_key_num = 0
self.conditional_sets = []
class ConditionalLocationRule(object):
def __init__(self, conditional_set):
self.conditional_set = conditional_set
self.small_key_num = 0
class PlacementRule(object):
def __init__(self):
self.door_reference = None
self.small_key = None
self.bk_conditional_set = None # the location that means
self.needed_keys_w_bk = None
self.needed_keys_wo_bk = None
self.check_locations_w_bk = None
self.check_locations_wo_bk = None
self.bk_relevant = True
self.key_reduced = False
def contradicts(self, rule, unplaced_keys, big_key_loc):
bk_blocked = big_key_loc in self.bk_conditional_set if self.bk_conditional_set else False
rule_blocked = big_key_loc in rule.bk_conditional_set if rule.bk_conditional_set else False
check_locations = self.check_locations_wo_bk if bk_blocked else self.check_locations_w_bk
rule_locations = rule.check_locations_wo_bk if rule_blocked else rule.check_locations_w_bk
if check_locations is None or rule_locations is None:
return False
check_locations = check_locations - {big_key_loc}
rule_locations = rule_locations - {big_key_loc}
threshold = self.needed_keys_wo_bk if bk_blocked else self.needed_keys_w_bk
rule_threshold = rule.needed_keys_wo_bk if rule_blocked else rule.needed_keys_w_bk
common_locations = rule_locations & check_locations
shared = len(common_locations)
if min(rule_threshold, threshold) - shared > 0:
left = unplaced_keys - shared
check_locations = check_locations - common_locations
check_needed = threshold - shared
if len(check_locations) < check_needed or left < check_needed:
return True
else:
left -= check_needed
rule_locations = rule_locations - common_locations
rule_needed = rule_threshold - shared
if len(rule_locations) < rule_needed or left < rule_needed:
return True
else:
left -= rule_needed
return False
def is_satisfiable(self, outside_keys, unplaced_keys):
bk_blocked = False
if self.bk_conditional_set:
for loc in self.bk_conditional_set:
if loc.item and loc.item.bigkey:
bk_blocked = True
break
check_locations = self.check_locations_wo_bk if bk_blocked else self.check_locations_w_bk
if not bk_blocked and check_locations is None:
return True
available_keys = outside_keys
empty_chests = 0
threshold = self.needed_keys_wo_bk if bk_blocked else self.needed_keys_w_bk
for loc in check_locations:
if not loc.item:
empty_chests += 1
elif loc.item and loc.item.name == self.small_key:
available_keys += 1
place_able_keys = min(empty_chests, unplaced_keys)
available_keys += place_able_keys
return available_keys >= threshold
class KeyCounter(object):
def __init__(self, max_chests):
self.max_chests = max_chests
self.free_locations = {}
self.key_only_locations = {}
self.child_doors = {}
self.open_doors = {}
self.used_keys = 0
self.big_key_opened = False
self.important_location = False
self.other_locations = {}
self.important_locations = {}
def used_smalls_loc(self, reserve=0):
return max(self.used_keys + reserve - len(self.key_only_locations), 0)
def build_key_layout(builder, start_regions, proposal, world, player):
key_layout = KeyLayout(builder.master_sector, start_regions, proposal)
key_layout.flat_prop = flatten_pair_list(key_layout.proposal)
key_layout.max_drops = count_key_drops(key_layout.sector)
key_layout.max_chests = calc_max_chests(builder, key_layout, world, player)
key_layout.big_key_special = check_bk_special(key_layout.sector.region_set(), world, player)
key_layout.all_locations = find_all_locations(key_layout.sector)
return key_layout
def count_key_drops(sector):
cnt = 0
for region in sector.regions:
for loc in region.locations:
if loc.forced_item and 'Small Key' in loc.item.name:
cnt += 1
return cnt
def find_all_locations(sector):
all_locations = set()
for region in sector.regions:
for loc in region.locations:
all_locations.add(loc)
return all_locations
def calc_max_chests(builder, key_layout, world, player):
if world.doorShuffle[player] != 'crossed':
return len(world.get_dungeon(key_layout.sector.name, player).small_keys)
return max(0, builder.key_doors_num - key_layout.max_drops)
def analyze_dungeon(key_layout, world, player):
key_layout.key_counters = create_key_counters(key_layout, world, player)
key_logic = key_layout.key_logic
find_bk_locked_sections(key_layout, world, player)
key_logic.bk_chests.update(find_big_chest_locations(key_layout.all_chest_locations))
key_logic.bk_chests.update(find_big_key_locked_locations(key_layout.all_chest_locations))
if world.retro[player] and world.mode[player] != 'standard':
return
original_key_counter = find_counter({}, False, key_layout)
queue = deque([(None, original_key_counter)])
doors_completed = set()
visited_cid = set()
visited_cid.add(cid(original_key_counter, key_layout))
while len(queue) > 0:
queue = deque(sorted(queue, key=queue_sorter))
parent_door, key_counter = queue.popleft()
chest_keys = available_chest_small_keys(key_counter, world, player)
raw_avail = chest_keys + len(key_counter.key_only_locations)
available = raw_avail - key_counter.used_keys
possible_smalls = count_unique_small_doors(key_counter, key_layout.flat_prop)
avail_bigs = exist_relevant_big_doors(key_counter, key_layout) or exist_big_chest(key_counter)
non_big_locs = count_locations_big_optional(key_counter.free_locations)
big_avail = key_counter.big_key_opened or (key_layout.big_key_special and any(x for x in key_counter.other_locations.keys() if x.forced_item and x.forced_item.bigkey))
if not big_avail:
if chest_keys == non_big_locs and chest_keys > 0 and available <= possible_smalls and not avail_bigs:
key_logic.bk_restricted.update(filter_big_chest(key_counter.free_locations))
# try to relax the rules here? - smallest requirement that doesn't force a softlock
child_queue = deque()
for child in key_counter.child_doors.keys():
if not child.bigKey or not key_layout.big_key_special or big_avail:
odd_counter = create_odd_key_counter(child, key_counter, key_layout, world, player)
empty_flag = empty_counter(odd_counter)
child_queue.append((child, odd_counter, empty_flag))
while len(child_queue) > 0:
child, odd_counter, empty_flag = child_queue.popleft()
if not child.bigKey and child not in doors_completed:
best_counter = find_best_counter(child, odd_counter, key_counter, key_layout, world, player, False, empty_flag)
rule = create_rule(best_counter, key_counter, key_layout, world, player)
check_for_self_lock_key(rule, child, best_counter, key_layout, world, player)
bk_restricted_rules(rule, child, odd_counter, empty_flag, key_counter, key_layout, world, player)
key_logic.door_rules[child.name] = rule
doors_completed.add(child)
next_counter = find_next_counter(child, key_counter, key_layout)
ctr_id = cid(next_counter, key_layout)
if ctr_id not in visited_cid:
queue.append((child, next_counter))
visited_cid.add(ctr_id)
check_rules(original_key_counter, key_layout, world, player)
# Flip bk rules if more restrictive, to prevent placing a big key in a softlocking location
for rule in key_logic.door_rules.values():
if rule.alternate_small_key is not None and rule.alternate_small_key > rule.small_key_num:
max_counter = find_max_counter(key_layout)
rule.alternate_big_key_loc = set(max_counter.free_locations.keys()).difference(rule.alternate_big_key_loc)
rule.small_key_num, rule.alternate_small_key = rule.alternate_small_key, rule.small_key_num
create_exhaustive_placement_rules(key_layout, world, player)
set_paired_rules(key_logic, world, player)
def create_exhaustive_placement_rules(key_layout, world, player):
key_logic = key_layout.key_logic
max_ctr = find_max_counter(key_layout)
for code, key_counter in key_layout.key_counters.items():
accessible_loc = set()
accessible_loc.update(key_counter.free_locations)
accessible_loc.update(key_counter.key_only_locations)
blocked_loc = key_layout.item_locations.difference(accessible_loc)
valid_rule = True
# min_keys = max(count_unique_sm_doors(key_counter.child_doors), key_counter.used_keys + 1)
min_keys = key_counter.used_keys + 1
if len(blocked_loc) > 0 and len(key_counter.key_only_locations) < min_keys:
rule = PlacementRule()
rule.door_reference = code
rule.small_key = key_logic.small_key_name
if key_counter.big_key_opened or not big_key_progress(key_counter):
rule.needed_keys_w_bk = min_keys
rule.bk_relevant = key_counter.big_key_opened
if key_counter.big_key_opened and rule.needed_keys_w_bk + 1 > len(accessible_loc):
valid_rule = False # indicates that the big key cannot be in the accessible locations
key_logic.bk_restricted.update(accessible_loc.difference(max_ctr.key_only_locations))
else:
placement_self_lock_adjustment(rule, max_ctr, blocked_loc, key_counter, world, player)
rule.check_locations_w_bk = accessible_loc
check_sm_restriction_needed(key_layout, max_ctr, rule, blocked_loc)
else:
if big_key_progress(key_counter) and only_sm_doors(key_counter):
create_inclusive_rule(key_layout, max_ctr, code, key_counter, blocked_loc, accessible_loc, min_keys, world, player)
rule.bk_conditional_set = blocked_loc
rule.needed_keys_wo_bk = min_keys
rule.check_locations_wo_bk = set(filter_big_chest(accessible_loc))
if valid_rule:
key_logic.placement_rules.append(rule)
adjust_locations_rules(key_logic, rule, accessible_loc, key_layout, key_counter, max_ctr)
refine_placement_rules(key_layout, max_ctr)
refine_location_rules(key_layout)
def placement_self_lock_adjustment(rule, max_ctr, blocked_loc, ctr, world, player):
if len(blocked_loc) == 1 and world.accessibility[player] != 'locations':
blocked_others = set(max_ctr.other_locations).difference(set(ctr.other_locations))
important_found = False
for loc in blocked_others:
if important_location(loc, world, player):
important_found = True
break
if not important_found:
rule.needed_keys_w_bk -= 1
def check_sm_restriction_needed(key_layout, max_ctr, rule, blocked):
if rule.needed_keys_w_bk == key_layout.max_chests + len(max_ctr.key_only_locations):
key_layout.key_logic.sm_restricted.update(blocked.difference(max_ctr.key_only_locations))
return True
return False
def adjust_locations_rules(key_logic, rule, accessible_loc, key_layout, key_counter, max_ctr):
if rule.bk_conditional_set:
test_set = (rule.bk_conditional_set - key_logic.bk_locked) - set(max_ctr.key_only_locations.keys())
needed = rule.needed_keys_wo_bk if test_set else 0
else:
test_set = None
needed = rule.needed_keys_w_bk
if needed > 0:
all_accessible = set(accessible_loc)
all_accessible.update(key_counter.other_locations)
blocked_loc = key_layout.all_locations-all_accessible
for location in blocked_loc:
if location not in key_logic.location_rules.keys():
loc_rule = LocationRule()
key_logic.location_rules[location] = loc_rule
else:
loc_rule = key_logic.location_rules[location]
if test_set:
if location not in key_logic.bk_locked:
cond_rule = None
for other in loc_rule.conditional_sets:
if other.conditional_set == test_set:
cond_rule = other
break
if not cond_rule:
cond_rule = ConditionalLocationRule(test_set)
loc_rule.conditional_sets.append(cond_rule)
cond_rule.small_key_num = max(needed, cond_rule.small_key_num)
else:
loc_rule.small_key_num = max(needed, loc_rule.small_key_num)
def refine_placement_rules(key_layout, max_ctr):
key_logic = key_layout.key_logic
changed = True
while changed:
changed = False
rules_to_remove = []
for rule in key_logic.placement_rules:
if rule.check_locations_w_bk:
rule.check_locations_w_bk.difference_update(key_logic.sm_restricted)
key_onlys = rule.check_locations_w_bk.intersection(max_ctr.key_only_locations)
if len(key_onlys) > 0:
rule.check_locations_w_bk.difference_update(key_onlys)
rule.needed_keys_w_bk -= len(key_onlys)
if rule.needed_keys_w_bk == 0:
rules_to_remove.append(rule)
# todo: evaluate this usage
# if rule.bk_relevant and len(rule.check_locations_w_bk) == rule.needed_keys_w_bk + 1:
# new_restricted = set(max_ctr.free_locations) - rule.check_locations_w_bk
# if len(new_restricted | key_logic.bk_restricted) < len(key_layout.all_chest_locations):
# if len(new_restricted - key_logic.bk_restricted) > 0:
# key_logic.bk_restricted.update(new_restricted) # bk must be in one of the check_locations
# changed = True
# else:
# rules_to_remove.append(rule)
# changed = True
if rule.needed_keys_w_bk > key_layout.max_chests or len(rule.check_locations_w_bk) < rule.needed_keys_w_bk:
logging.getLogger('').warning('Invalid rule - what went wrong here??')
rules_to_remove.append(rule)
changed = True
if rule.bk_conditional_set is not None:
rule.bk_conditional_set.difference_update(key_logic.bk_restricted)
rule.bk_conditional_set.difference_update(max_ctr.key_only_locations)
if len(rule.bk_conditional_set) == 0:
rules_to_remove.append(rule)
if rule.check_locations_wo_bk:
rule.check_locations_wo_bk.difference_update(key_logic.sm_restricted)
key_onlys = rule.check_locations_wo_bk.intersection(max_ctr.key_only_locations)
if len(key_onlys) > 0:
rule.check_locations_wo_bk.difference_update(key_onlys)
rule.needed_keys_wo_bk -= len(key_onlys)
if rule.needed_keys_wo_bk == 0:
rules_to_remove.append(rule)
if len(rule.check_locations_wo_bk) < rule.needed_keys_wo_bk or rule.needed_keys_wo_bk > key_layout.max_chests:
if len(rule.bk_conditional_set) > 0:
key_logic.bk_restricted.update(rule.bk_conditional_set)
rules_to_remove.append(rule)
changed = True # impossible for bk to be here, I think
for rule_a, rule_b in itertools.combinations([x for x in key_logic.placement_rules if x not in rules_to_remove], 2):
if rule_b.bk_conditional_set and rule_a.check_locations_w_bk:
temp = rule_a
rule_a = rule_b
rule_b = temp
if rule_a.bk_conditional_set and rule_b.check_locations_w_bk:
common_needed = min(rule_a.needed_keys_wo_bk, rule_b.needed_keys_w_bk)
if len(rule_b.check_locations_w_bk & rule_a.check_locations_wo_bk) < common_needed:
key_logic.bk_restricted.update(rule_a.bk_conditional_set)
rules_to_remove.append(rule_a)
changed = True
break
equivalent_rules = []
for rule in key_logic.placement_rules:
for rule2 in key_logic.placement_rules:
if rule != rule2:
if rule.check_locations_w_bk and rule2.check_locations_w_bk:
if rule2.check_locations_w_bk == rule.check_locations_w_bk and rule2.needed_keys_w_bk > rule.needed_keys_w_bk:
rules_to_remove.append(rule)
elif rule2.needed_keys_w_bk == rule.needed_keys_w_bk and rule2.check_locations_w_bk < rule.check_locations_w_bk:
rules_to_remove.append(rule)
elif rule2.check_locations_w_bk == rule.check_locations_w_bk and rule2.needed_keys_w_bk == rule.needed_keys_w_bk:
equivalent_rules.append((rule, rule2))
if rule.check_locations_wo_bk and rule2.check_locations_wo_bk and rule.bk_conditional_set == rule2.bk_conditional_set:
if rule2.check_locations_wo_bk == rule.check_locations_wo_bk and rule2.needed_keys_wo_bk > rule.needed_keys_wo_bk:
rules_to_remove.append(rule)
elif rule2.needed_keys_wo_bk == rule.needed_keys_wo_bk and rule2.check_locations_wo_bk < rule.check_locations_wo_bk:
rules_to_remove.append(rule)
elif rule2.check_locations_wo_bk == rule.check_locations_wo_bk and rule2.needed_keys_wo_bk == rule.needed_keys_wo_bk:
equivalent_rules.append((rule, rule2))
if len(rules_to_remove) > 0:
key_logic.placement_rules = [x for x in key_logic.placement_rules if x not in rules_to_remove]
equivalent_rules = [x for x in equivalent_rules if x[0] not in rules_to_remove and x[1] not in rules_to_remove]
if len(equivalent_rules) > 0:
removed_rules = {}
for r1, r2 in equivalent_rules:
if r1 in removed_rules.keys():
r1 = removed_rules[r1]
if r2 in removed_rules.keys():
r2 = removed_rules[r2]
if r1 != r2:
r1.door_reference += ','+r2.door_reference
key_logic.placement_rules.remove(r2)
removed_rules[r2] = r1
def refine_location_rules(key_layout):
locs_to_remove = []
for loc, rule in key_layout.key_logic.location_rules.items():
conditions_to_remove = []
for cond_rule in rule.conditional_sets:
if cond_rule.small_key_num <= rule.small_key_num:
conditions_to_remove.append(cond_rule)
rule.conditional_sets = [x for x in rule.conditional_sets if x not in conditions_to_remove]
if rule.small_key_num == 0 and len(rule.conditional_sets) == 0:
locs_to_remove.append(loc)
for loc in locs_to_remove:
del key_layout.key_logic.location_rules[loc]
def create_inclusive_rule(key_layout, max_ctr, code, key_counter, blocked_loc, accessible_loc, min_keys, world, player):
key_logic = key_layout.key_logic
rule = PlacementRule()
rule.door_reference = code
rule.small_key = key_logic.small_key_name
rule.needed_keys_w_bk = min_keys
if key_counter.big_key_opened and rule.needed_keys_w_bk + 1 > len(accessible_loc):
# indicates that the big key cannot be in the accessible locations
key_logic.bk_restricted.update(accessible_loc.difference(max_ctr.key_only_locations))
else:
placement_self_lock_adjustment(rule, max_ctr, blocked_loc, key_counter, world, player)
rule.check_locations_w_bk = accessible_loc
check_sm_restriction_needed(key_layout, max_ctr, rule, blocked_loc)
key_logic.placement_rules.append(rule)
adjust_locations_rules(key_logic, rule, accessible_loc, key_layout, key_counter, max_ctr)
def queue_sorter(queue_item):
door, counter = queue_item
if door is None:
return 0
return 1 if door.bigKey else 0
def queue_sorter_2(queue_item):
door, counter, key_only = queue_item
if door is None:
return 0
return 1 if door.bigKey else 0
def find_bk_locked_sections(key_layout, world, player):
key_counters = key_layout.key_counters
key_logic = key_layout.key_logic
bk_not_required = set()
big_chest_allowed_big_key = world.accessibility[player] != 'locations'
for counter in key_counters.values():
key_layout.all_chest_locations.update(counter.free_locations)
key_layout.item_locations.update(counter.free_locations)
key_layout.item_locations.update(counter.key_only_locations)
key_layout.all_locations.update(key_layout.item_locations)
key_layout.all_locations.update(counter.other_locations)
if counter.big_key_opened and counter.important_location:
big_chest_allowed_big_key = False
if not counter.big_key_opened:
bk_not_required.update(counter.free_locations)
bk_not_required.update(counter.key_only_locations)
bk_not_required.update(counter.other_locations)
# todo?: handle bk special differently in cross dungeon
# notably: things behind bk doors - relying on the bk door logic atm
if not key_layout.big_key_special:
key_logic.bk_restricted.update(dict.fromkeys(set(key_layout.all_chest_locations).difference(bk_not_required)))
key_logic.bk_locked.update(dict.fromkeys(set(key_layout.all_locations) - bk_not_required))
if not big_chest_allowed_big_key:
bk_required_locations = find_big_chest_locations(key_layout.all_chest_locations)
bk_required_locations += find_big_key_locked_locations(key_layout.all_chest_locations)
key_logic.bk_restricted.update(bk_required_locations)
key_logic.bk_locked.update(bk_required_locations)
def empty_counter(counter):
if len(counter.key_only_locations) != 0 or len(counter.free_locations) != 0 or len(counter.child_doors) != 0:
return False
return not counter.important_location
def relative_empty_counter(odd_counter, key_counter):
if len(set(odd_counter.key_only_locations).difference(key_counter.key_only_locations)) > 0:
return False
if len(set(odd_counter.free_locations).difference(key_counter.free_locations)) > 0:
return False
# important only
if len(set(odd_counter.important_locations).difference(key_counter.important_locations)) > 0:
return False
new_child_door = False
for child in odd_counter.child_doors:
if unique_child_door(child, key_counter):
new_child_door = True
break
if new_child_door:
return False
return True
def relative_empty_counter_2(odd_counter, key_counter):
if len(set(odd_counter.key_only_locations).difference(key_counter.key_only_locations)) > 0:
return False
if len(set(odd_counter.free_locations).difference(key_counter.free_locations)) > 0:
return False
# important only
if len(set(odd_counter.important_locations).difference(key_counter.important_locations)) > 0:
return False
for child in odd_counter.child_doors:
if unique_child_door_2(child, key_counter):
return False
return True
def progressive_ctr(new_counter, last_counter):
if len(set(new_counter.key_only_locations).difference(last_counter.key_only_locations)) > 0:
return True
if len(set(new_counter.free_locations).difference(last_counter.free_locations)) > 0:
return True
for child in new_counter.child_doors:
if unique_child_door_2(child, last_counter):
return True
return False
def unique_child_door(child, key_counter):
if child in key_counter.child_doors or child.dest in key_counter.child_doors:
return False
if child in key_counter.open_doors or child.dest in key_counter.open_doors:
return False
if child.bigKey and key_counter.big_key_opened:
return False
return True
def unique_child_door_2(child, key_counter):
if child in key_counter.child_doors or child.dest in key_counter.child_doors:
return False
if child in key_counter.open_doors or child.dest in key_counter.open_doors:
return False
return True
def find_best_counter(door, odd_counter, key_counter, key_layout, world, player, skip_bk, empty_flag): # try to waste as many keys as possible?
ignored_doors = {door, door.dest} if door is not None else {}
finished = False
opened_doors = dict(key_counter.open_doors)
bk_opened = key_counter.big_key_opened
# new_counter = key_counter
last_counter = key_counter
while not finished:
door_set = find_potential_open_doors(last_counter, ignored_doors, key_layout, skip_bk)
if door_set is None or len(door_set) == 0:
finished = True
continue
for new_door in door_set:
proposed_doors = {**opened_doors, **dict.fromkeys([new_door, new_door.dest])}
bk_open = bk_opened or new_door.bigKey
new_counter = find_counter(proposed_doors, bk_open, key_layout)
bk_open = new_counter.big_key_opened
# this means the new_door invalidates the door / leads to the same stuff
if not empty_flag and relative_empty_counter(odd_counter, new_counter):
ignored_doors.add(new_door)
elif empty_flag or key_wasted(new_door, door, last_counter, new_counter, key_layout, world, player):
last_counter = new_counter
opened_doors = proposed_doors
bk_opened = bk_open
else:
ignored_doors.add(new_door)
return last_counter
def find_worst_counter(door, odd_counter, key_counter, key_layout, skip_bk): # try to waste as many keys as possible?
ignored_doors = {door, door.dest} if door is not None else {}
finished = False
opened_doors = dict(key_counter.open_doors)
bk_opened = key_counter.big_key_opened
# new_counter = key_counter
last_counter = key_counter
while not finished:
door_set = find_potential_open_doors(last_counter, ignored_doors, key_layout, skip_bk, 0)
if door_set is None or len(door_set) == 0:
finished = True
continue
for new_door in door_set:
proposed_doors = {**opened_doors, **dict.fromkeys([new_door, new_door.dest])}
bk_open = bk_opened or new_door.bigKey
new_counter = find_counter(proposed_doors, bk_open, key_layout)
bk_open = new_counter.big_key_opened
if not new_door.bigKey and progressive_ctr(new_counter, last_counter) and relative_empty_counter_2(odd_counter, new_counter):
ignored_doors.add(new_door)
else:
last_counter = new_counter
opened_doors = proposed_doors
bk_opened = bk_open
# this means the new_door invalidates the door / leads to the same stuff
return last_counter
def find_potential_open_doors(key_counter, ignored_doors, key_layout, skip_bk, reserve=1):
small_doors = []
big_doors = []
if key_layout.big_key_special:
big_key_available = any(x for x in key_counter.other_locations.keys() if x.forced_item and x.forced_item.bigkey)
else:
big_key_available = len(key_counter.free_locations) - key_counter.used_smalls_loc(reserve) > 0
for other in key_counter.child_doors:
if other not in ignored_doors and other.dest not in ignored_doors:
if other.bigKey:
if not skip_bk and (not key_layout.big_key_special or big_key_available):
big_doors.append(other)
elif other.dest not in small_doors:
small_doors.append(other)
if len(small_doors) == 0 and (not skip_bk and (len(big_doors) == 0 or not big_key_available)):
return None
return small_doors + big_doors
def key_wasted(new_door, old_door, old_counter, new_counter, key_layout, world, player):
if new_door.bigKey: # big keys are not wastes - it uses up a location
return True
chest_keys = available_chest_small_keys(old_counter, world, player)
old_key_diff = len(old_counter.key_only_locations) - old_counter.used_keys
old_avail = chest_keys + old_key_diff
new_chest_keys = available_chest_small_keys(new_counter, world, player)
new_key_diff = len(new_counter.key_only_locations) - new_counter.used_keys
new_avail = new_chest_keys + new_key_diff
if new_key_diff < old_key_diff or new_avail < old_avail:
return True
if new_avail >= old_avail:
wasted_keys = 0
old_children = old_counter.child_doors.keys()
new_children = [x for x in new_counter.child_doors.keys() if x != old_door and x.dest != old_door and (not x.bigKey or x not in old_children)]
current_counter = new_counter
opened_doors = dict(current_counter.open_doors)
bk_opened = current_counter.big_key_opened
for new_child in new_children:
proposed_doors = {**opened_doors, **dict.fromkeys([new_child, new_child.dest])}
bk_open = bk_opened or new_door.bigKey
new_counter = find_counter(proposed_doors, bk_open, key_layout)
if key_wasted(new_child, old_door, current_counter, new_counter, key_layout, world, player):
wasted_keys += 1
if new_avail - wasted_keys < old_avail:
return True # waste is possible
return False
def find_next_counter(new_door, old_counter, key_layout):
proposed_doors = {**old_counter.open_doors, **dict.fromkeys([new_door, new_door.dest])}
bk_open = old_counter.big_key_opened or new_door.bigKey
return find_counter(proposed_doors, bk_open, key_layout)
def check_special_locations(locations):
for loc in locations:
if loc.name == 'Hyrule Castle - Zelda\'s Chest':
return True
return False
def calc_avail_keys(key_counter, world, player):
chest_keys = available_chest_small_keys(key_counter, world, player)
raw_avail = chest_keys + len(key_counter.key_only_locations)
return raw_avail - key_counter.used_keys
def create_rule(key_counter, prev_counter, key_layout, world, player):
# prev_chest_keys = available_chest_small_keys(prev_counter, world)
# prev_avail = prev_chest_keys + len(prev_counter.key_only_locations)
chest_keys = available_chest_small_keys(key_counter, world, player)
key_gain = len(key_counter.key_only_locations) - len(prev_counter.key_only_locations)
# previous method
# raw_avail = chest_keys + len(key_counter.key_only_locations)
# available = raw_avail - key_counter.used_keys
# possible_smalls = count_unique_small_doors(key_counter, key_layout.flat_prop)
# required_keys = min(available, possible_smalls) + key_counter.used_keys
required_keys = key_counter.used_keys + 1 # this makes more sense, if key_counter has wasted all keys
adj_chest_keys = min(chest_keys, required_keys)
needed_chests = required_keys - len(key_counter.key_only_locations)
is_valid = needed_chests <= chest_keys
unneeded_chests = min(key_gain, max(0, adj_chest_keys - needed_chests))
rule_num = required_keys - unneeded_chests
return DoorRules(rule_num, is_valid)
def check_for_self_lock_key(rule, door, parent_counter, key_layout, world, player):
if world.accessibility[player] != 'locations':
counter = find_inverted_counter(door, parent_counter, key_layout, world, player)
if not self_lock_possible(counter):
return
if len(counter.free_locations) == 1 and len(counter.key_only_locations) == 0 and not counter.important_location:
rule.allow_small = True
rule.small_location = next(iter(counter.free_locations))
def find_inverted_counter(door, parent_counter, key_layout, world, player):
# open all doors in counter
counter = open_all_counter(parent_counter, key_layout, door=door)
max_counter = find_max_counter(key_layout)
# find the difference
inverted_counter = KeyCounter(key_layout.max_chests)
inverted_counter.free_locations = dict_difference(max_counter.free_locations, counter.free_locations)
inverted_counter.key_only_locations = dict_difference(max_counter.key_only_locations, counter.key_only_locations)
# child doors? used_keys?
# inverted_counter.child_doors = dict_difference(max_counter.child_doors, counter.child_doors)
inverted_counter.open_doors = dict_difference(max_counter.open_doors, counter.open_doors)
inverted_counter.other_locations = dict_difference(max_counter.other_locations, counter.other_locations)
for loc in inverted_counter.other_locations:
if important_location(loc, world, player):
inverted_counter.important_location = True
return inverted_counter
def open_all_counter(parent_counter, key_layout, door=None, skipBk=False):
changed = True
counter = parent_counter
proposed_doors = dict.fromkeys(parent_counter.open_doors.keys())
while changed:
changed = False
doors_to_open = {}
for child in counter.child_doors:
if door is None or (child != door and child != door.dest):
if skipBk:
if not child.bigKey:
doors_to_open[child] = None
elif not child.bigKey or not key_layout.big_key_special or counter.big_key_opened:
doors_to_open[child] = None
if len(doors_to_open.keys()) > 0:
proposed_doors = {**proposed_doors, **doors_to_open}
bk_hint = counter.big_key_opened
for d in doors_to_open.keys():
bk_hint = bk_hint or d.bigKey
counter = find_counter(proposed_doors, bk_hint, key_layout)
changed = True
return counter
def open_some_counter(parent_counter, key_layout, ignored_doors):
changed = True
counter = parent_counter
proposed_doors = dict.fromkeys(parent_counter.open_doors.keys())
while changed:
changed = False
doors_to_open = {}
for child in counter.child_doors:
if child not in ignored_doors:
if not child.bigKey:
doors_to_open[child] = None
if len(doors_to_open.keys()) > 0:
proposed_doors = {**proposed_doors, **doors_to_open}
bk_hint = counter.big_key_opened
for d in doors_to_open.keys():
bk_hint = bk_hint or d.bigKey
counter = find_counter(proposed_doors, bk_hint, key_layout)
changed = True
return counter
def self_lock_possible(counter):
return len(counter.free_locations) <= 1 and len(counter.key_only_locations) == 0 and not counter.important_location
def available_chest_small_keys(key_counter, world, player):
if not world.keyshuffle[player] and not world.retro[player]:
cnt = 0
for loc in key_counter.free_locations:
if key_counter.big_key_opened or '- Big Chest' not in loc.name:
cnt += 1
return min(cnt, key_counter.max_chests)
else:
return key_counter.max_chests
def available_chest_small_keys_logic(key_counter, world, player, sm_restricted):
if not world.keyshuffle[player] and not world.retro[player]:
cnt = 0
for loc in key_counter.free_locations:
if loc not in sm_restricted and (key_counter.big_key_opened or '- Big Chest' not in loc.name):
cnt += 1
return min(cnt, key_counter.max_chests)
else:
return key_counter.max_chests
def big_key_drop_available(key_counter):
for loc in key_counter.other_locations:
if loc.forced_big_key():
return True
return False
def bk_restricted_rules(rule, door, odd_counter, empty_flag, key_counter, key_layout, world, player):
if key_counter.big_key_opened:
return
best_counter = find_best_counter(door, odd_counter, key_counter, key_layout, world, player, True, empty_flag)
bk_rule = create_rule(best_counter, key_counter, key_layout, world, player)
if bk_rule.small_key_num >= rule.small_key_num:
return
door_open = find_next_counter(door, best_counter, key_layout)
ignored_doors = dict_intersection(best_counter.child_doors, door_open.child_doors)
dest_ignored = []
for door in ignored_doors.keys():
if door.dest not in ignored_doors:
dest_ignored.append(door.dest)
ignored_doors = {**ignored_doors, **dict.fromkeys(dest_ignored)}
post_counter = open_some_counter(door_open, key_layout, ignored_doors.keys())
unique_loc = dict_difference(post_counter.free_locations, best_counter.free_locations)
# todo: figure out the intention behind this change - better way to detect the big key is blocking needed key onlys?
if len(unique_loc) > 0: # and bk_rule.is_valid
rule.alternate_small_key = bk_rule.small_key_num
rule.alternate_big_key_loc.update(unique_loc)
# elif not bk_rule.is_valid:
# key_layout.key_logic.bk_restricted.update(unique_loc)
def find_worst_counter_wo_bk(small_key_num, accessible_set, door, odd_ctr, key_counter, key_layout):
if key_counter.big_key_opened:
return None, None, None
worst_counter = find_worst_counter(door, odd_ctr, key_counter, key_layout, True)
bk_rule_num = worst_counter.used_keys + 1
bk_access_set = set()
bk_access_set.update(worst_counter.free_locations)
bk_access_set.update(worst_counter.key_only_locations)
if bk_rule_num == small_key_num and len(bk_access_set ^ accessible_set) == 0:
return None, None, None
door_open = find_next_counter(door, worst_counter, key_layout)
ignored_doors = dict_intersection(worst_counter.child_doors, door_open.child_doors)
dest_ignored = []
for door in ignored_doors.keys():
if door.dest not in ignored_doors:
dest_ignored.append(door.dest)
ignored_doors = {**ignored_doors, **dict.fromkeys(dest_ignored)}
post_counter = open_some_counter(door_open, key_layout, ignored_doors.keys())
return worst_counter, post_counter, bk_rule_num
def open_a_door(door, child_state, flat_proposal):
if door.bigKey or door.name in special_big_key_doors:
child_state.big_key_opened = True
child_state.avail_doors.extend(child_state.big_doors)
child_state.opened_doors.extend(set([d.door for d in child_state.big_doors]))
child_state.big_doors.clear()
else:
child_state.opened_doors.append(door)
doors_to_open = [x for x in child_state.small_doors if x.door == door]
child_state.small_doors[:] = [x for x in child_state.small_doors if x.door != door]
child_state.avail_doors.extend(doors_to_open)
dest_door = door.dest
if dest_door in flat_proposal and door.type != DoorType.SpiralStairs:
child_state.opened_doors.append(dest_door)
if child_state.in_door_list_ic(dest_door, child_state.small_doors):
now_available = [x for x in child_state.small_doors if x.door == dest_door]
child_state.small_doors[:] = [x for x in child_state.small_doors if x.door != dest_door]
child_state.avail_doors.extend(now_available)
# allows dest doors
def unique_doors(doors):
unique_d_set = []
for d in doors:
if d.door not in unique_d_set:
unique_d_set.append(d.door)
return unique_d_set
# does not allow dest doors
def count_unique_sm_doors(doors):
unique_d_set = set()
for d in doors:
if d not in unique_d_set and (d.dest not in unique_d_set or d.type == DoorType.SpiralStairs) and not d.bigKey:
unique_d_set.add(d)
return len(unique_d_set)
def big_key_progress(key_counter):
return not only_sm_doors(key_counter) or exist_big_chest(key_counter)
def only_sm_doors(key_counter):
for door in key_counter.child_doors:
if door.bigKey:
return False
return True
# doesn't count dest doors
def count_unique_small_doors(key_counter, proposal):
cnt = 0
counted = set()
for door in key_counter.child_doors:
if door in proposal and door not in counted:
cnt += 1
counted.add(door)
if door.type != DoorType.SpiralStairs:
counted.add(door.dest)
return cnt
def exist_relevant_big_doors(key_counter, key_layout):
bk_counter = find_counter(key_counter.open_doors, True, key_layout, False)
if bk_counter is not None:
diff = dict_difference(bk_counter.free_locations, key_counter.free_locations)
if len(diff) > 0:
return True
diff = dict_difference(bk_counter.key_only_locations, key_counter.key_only_locations)
if len(diff) > 0:
return True
diff = dict_difference(bk_counter.child_doors, key_counter.child_doors)
if len(diff) > 0:
return True
return False
def exist_big_chest(key_counter):
for loc in key_counter.free_locations:
if '- Big Chest' in loc.name:
return True
return False
def count_locations_big_optional(locations, bk=False):
cnt = 0
for loc in locations:
if bk or '- Big Chest' not in loc.name:
cnt += 1
return cnt
def filter_big_chest(locations):
return [x for x in locations if '- Big Chest' not in x.name]
def count_locations_exclude_logic(locations, key_logic):
cnt = 0
for loc in locations:
if not location_is_bk_locked(loc, key_logic) and not loc.forced_item and not prize_or_event(loc):
cnt += 1
return cnt
def location_is_bk_locked(loc, key_logic):
return loc in key_logic.bk_chests or loc in key_logic.bk_locked
def prize_or_event(loc):
return loc.name in dungeon_events or '- Prize' in loc.name or loc.name in ['Agahnim 1', 'Agahnim 2']
def count_free_locations(state):
cnt = 0
for loc in state.found_locations:
if not prize_or_event(loc) and not loc.forced_item:
cnt += 1
return cnt
def count_locations_exclude_big_chest(state):
cnt = 0
for loc in state.found_locations:
if '- Big Chest' not in loc.name and not loc.forced_item and not prize_or_event(loc):
cnt += 1
return cnt
def count_small_key_only_locations(state):
cnt = 0
for loc in state.found_locations:
if loc.forced_item and loc.item.smallkey:
cnt += 1
return cnt
def big_chest_in_locations(locations):
return len(find_big_chest_locations(locations)) > 0
def find_big_chest_locations(locations):
ret = []
for loc in locations:
if 'Big Chest' in loc.name:
ret.append(loc)
return ret
def find_big_key_locked_locations(locations):
ret = []
for loc in locations:
if loc.name in ["Thieves' Town - Blind's Cell", "Hyrule Castle - Zelda's Chest"]:
ret.append(loc)
return ret
def expand_key_state(state, flat_proposal, world, player):
while len(state.avail_doors) > 0:
exp_door = state.next_avail_door()
door = exp_door.door
connect_region = world.get_entrance(door.name, player).connected_region
if state.validate(door, connect_region, world, player):
state.visit_region(connect_region, key_checks=True)
state.add_all_doors_check_keys(connect_region, flat_proposal, world, player)
def flatten_pair_list(paired_list):
flat_list = []
for d in paired_list:
if type(d) is tuple:
flat_list.append(d[0])
flat_list.append(d[1])
else:
flat_list.append(d)
return flat_list
def check_rules(original_counter, key_layout, world, player):
all_key_only = set()
key_only_map = {}
queue = deque([(None, original_counter, original_counter.key_only_locations)])
completed = set()
completed.add(cid(original_counter, key_layout))
while len(queue) > 0:
queue = deque(sorted(queue, key=queue_sorter_2))
access_door, counter, key_only_loc = queue.popleft()
for loc in key_only_loc:
if loc not in all_key_only:
all_key_only.add(loc)
access_rules = []
key_only_map[loc] = access_rules
else:
access_rules = key_only_map[loc]
if access_door is None or access_door.name not in key_layout.key_logic.door_rules.keys():
if access_door is None or not access_door.bigKey:
access_rules.append(DoorRules(0, True))
else:
rule = key_layout.key_logic.door_rules[access_door.name]
if rule not in access_rules:
access_rules.append(rule)
for child in counter.child_doors.keys():
if not child.bigKey or not key_layout.big_key_special or counter.big_key_opened:
next_counter = find_next_counter(child, counter, key_layout)
c_id = cid(next_counter, key_layout)
if c_id not in completed:
completed.add(c_id)
new_key_only = dict_difference(next_counter.key_only_locations, counter.key_only_locations)
queue.append((child, next_counter, new_key_only))
min_rule_bk = defaultdict(list)
min_rule_non_bk = defaultdict(list)
check_non_bk = False
for loc, rule_list in key_only_map.items():
m_bk = None
m_nbk = None
for rule in rule_list:
if m_bk is None or rule.small_key_num <= m_bk:
min_rule_bk[loc].append(rule)
m_bk = rule.small_key_num
if rule.alternate_small_key is None:
ask = rule.small_key_num
else:
check_non_bk = True
ask = rule.alternate_small_key
if m_nbk is None or ask <= m_nbk:
min_rule_non_bk[loc].append(rule)
m_nbk = rule.alternate_small_key
adjust_key_location_mins(key_layout, min_rule_bk, lambda r: r.small_key_num, lambda r, v: setattr(r, 'small_key_num', v))
if check_non_bk:
adjust_key_location_mins(key_layout, min_rule_non_bk, lambda r: r.small_key_num if r.alternate_small_key is None else r.alternate_small_key,
lambda r, v: r if r.alternate_small_key is None else setattr(r, 'alternate_small_key', v))
check_rules_deep(original_counter, key_layout, world, player)
def adjust_key_location_mins(key_layout, min_rules, getter, setter):
collected_keys = key_layout.max_chests
collected_locs = set()
changed = True
while changed:
changed = False
for_removal = []
for loc, rules in min_rules.items():
if loc in collected_locs:
for_removal.append(loc)
for rule in rules:
if getter(rule) <= collected_keys and loc not in collected_locs:
changed = True
collected_keys += 1
collected_locs.add(loc)
for_removal.append(loc)
for loc in for_removal:
del min_rules[loc]
if len(min_rules) > 0:
for loc, rules in min_rules.items():
for rule in rules:
setter(rule, collected_keys)
def check_rules_deep(original_counter, key_layout, world, player):
key_logic = key_layout.key_logic
big_locations = {x for x in key_layout.all_chest_locations if x not in key_logic.bk_restricted}
queue = deque([original_counter])
completed = set()
completed.add(cid(original_counter, key_layout))
last_counter = None
bail = 0
while len(queue) > 0:
counter = queue.popleft()
if counter == last_counter:
bail += 1
if bail > 10:
raise Exception('Key logic issue, during deep rule check: %s' % key_layout.sector.name)
else:
bail = 0
last_counter = counter
chest_keys = available_chest_small_keys_logic(counter, world, player, key_logic.sm_restricted)
bk_drop = big_key_drop_available(counter)
big_avail = counter.big_key_opened or bk_drop
big_maybe_not_found = not counter.big_key_opened and not bk_drop # better named as big_missing?
if not key_layout.big_key_special and not big_avail:
if world.bigkeyshuffle[player]:
big_avail = True
else:
for location in counter.free_locations:
if location not in key_logic.bk_restricted:
big_avail = True
break
outstanding_big_locs = {x for x in big_locations if x not in counter.free_locations}
if big_maybe_not_found:
if len(outstanding_big_locs) == 0 and not key_layout.big_key_special:
big_maybe_not_found = False
big_uses_chest = big_avail and not key_layout.big_key_special
collected_alt = len(counter.key_only_locations) + chest_keys
if big_uses_chest and chest_keys == count_locations_big_optional(counter.free_locations, counter.big_key_opened):
chest_keys -= 1
collected = len(counter.key_only_locations) + chest_keys
can_progress = len(counter.child_doors) == 0
smalls_opened, big_opened = False, False
small_rules = []
for door in counter.child_doors.keys():
can_open = False
if door.bigKey and big_avail:
can_open = True
elif door.name in key_logic.door_rules.keys():
rule = key_logic.door_rules[door.name]
small_rules.append(rule)
if rule_satisfied(rule, collected, collected_alt, outstanding_big_locs, chest_keys, key_layout):
can_open = True
smalls_opened = True
elif not door.bigKey:
can_open = True
if can_open:
can_progress = smalls_opened or not big_maybe_not_found
next_counter = find_next_counter(door, counter, key_layout)
c_id = cid(next_counter, key_layout)
if c_id not in completed:
completed.add(c_id)
queue.append(next_counter)
if not can_progress:
if len(small_rules) > 0: # zero could be indicative of a problem, but also, the big key is now required
reduce_rules(small_rules, collected, collected_alt)
queue.append(counter) # run it through again
else:
raise Exception('Possible problem with generation or bk rules')
def rule_satisfied(rule, collected, collected_alt, outstanding_big_locs, chest_keys, key_layout):
if collected >= rule.small_key_num:
return True
if rule.allow_small and collected >= rule.small_key_num-1 and chest_keys < key_layout.max_chests:
return True
rule_diff = outstanding_big_locs.difference(rule.alternate_big_key_loc)
if rule.alternate_small_key is not None and len(rule_diff) == 0 and collected >= rule.alternate_small_key:
return True
if collected_alt > collected:
if collected_alt >= rule.small_key_num:
return True
if rule.allow_small and collected_alt >= rule.small_key_num-1 and chest_keys+1 < key_layout.max_chests:
return True
if rule.alternate_small_key is not None and len(rule_diff) == 0 and collected_alt >= rule.alternate_small_key:
return True
return False
def reduce_rules(small_rules, collected, collected_alt):
smallest_rules = []
min_num = None
for rule in small_rules:
if min_num is None or rule.small_key_num <= min_num:
if min_num is not None and rule.small_key_num < min_num:
min_num = rule.small_key_num
smallest_rules.clear()
elif min_num is None:
min_num = rule.small_key_num
smallest_rules.append(rule)
for rule in smallest_rules:
if rule.allow_small: # we are already reducing it
rule.allow_small = False
if min_num > collected_alt > collected:
rule.small_key_num = collected_alt
else:
rule.small_key_num = collected
def set_paired_rules(key_logic, world, player):
for d_name, rule in key_logic.door_rules.items():
door = world.get_door(d_name, player)
if door.dest.name in key_logic.door_rules.keys():
rule.opposite = key_logic.door_rules[door.dest.name]
def check_bk_special(regions, world, player):
for r_name in regions:
region = world.get_region(r_name, player)
for loc in region.locations:
if loc.forced_big_key():
return True
return False
# Soft lock stuff
def validate_key_layout(key_layout, world, player):
# retro is all good - except for hyrule castle in standard mode
if (world.retro[player] and (world.mode[player] != 'standard' or key_layout.sector.name != 'Hyrule Castle')) or world.logic[player] == 'nologic':
return True
flat_proposal = key_layout.flat_prop
state = ExplorationState(dungeon=key_layout.sector.name)
state.key_locations = key_layout.max_chests
state.big_key_special = check_bk_special(key_layout.sector.regions, world, player)
for region in key_layout.start_regions:
state.visit_region(region, key_checks=True)
state.add_all_doors_check_keys(region, flat_proposal, world, player)
return validate_key_layout_sub_loop(key_layout, state, {}, flat_proposal, None, 0, world, player)
def validate_key_layout_sub_loop(key_layout, state, checked_states, flat_proposal, prev_state, prev_avail, world, player):
expand_key_state(state, flat_proposal, world, player)
smalls_avail = len(state.small_doors) > 0 # de-dup crystal repeats
num_bigs = 1 if len(state.big_doors) > 0 else 0 # all or nothing
if not smalls_avail and num_bigs == 0:
return True # I think that's the end
# todo: fix state to separate out these types
ttl_locations = count_free_locations(state) if state.big_key_opened else count_locations_exclude_big_chest(state)
ttl_small_key_only = count_small_key_only_locations(state)
available_small_locations = cnt_avail_small_locations(ttl_locations, ttl_small_key_only, state, world, player)
available_big_locations = cnt_avail_big_locations(ttl_locations, state, world, player)
if invalid_self_locking_key(key_layout, state, prev_state, prev_avail, world, player):
return False
# todo: allow more key shuffles - refine placement rules
# if (not smalls_avail or available_small_locations == 0) and (state.big_key_opened or num_bigs == 0 or available_big_locations == 0):
found_forced_bk = state.found_forced_bk()
smalls_done = not smalls_avail or not enough_small_locations(state, available_small_locations)
bk_done = state.big_key_opened or num_bigs == 0 or (available_big_locations == 0 and not found_forced_bk)
if smalls_done and bk_done:
return False
else:
if smalls_avail and available_small_locations > 0:
for exp_door in state.small_doors:
state_copy = state.copy()
open_a_door(exp_door.door, state_copy, flat_proposal)
state_copy.used_smalls += 1
if state_copy.used_smalls > ttl_small_key_only:
state_copy.used_locations += 1
code = state_id(state_copy, flat_proposal)
if code not in checked_states.keys():
valid = validate_key_layout_sub_loop(key_layout, state_copy, checked_states, flat_proposal,
state, available_small_locations, world, player)
checked_states[code] = valid
else:
valid = checked_states[code]
if not valid:
return False
if not state.big_key_opened and (available_big_locations >= num_bigs > 0 or (found_forced_bk and num_bigs > 0)):
state_copy = state.copy()
open_a_door(state.big_doors[0].door, state_copy, flat_proposal)
if not found_forced_bk:
state_copy.used_locations += 1
code = state_id(state_copy, flat_proposal)
if code not in checked_states.keys():
valid = validate_key_layout_sub_loop(key_layout, state_copy, checked_states, flat_proposal,
state, available_small_locations, world, player)
checked_states[code] = valid
else:
valid = checked_states[code]
if not valid:
return False
return True
def invalid_self_locking_key(key_layout, state, prev_state, prev_avail, world, player):
if prev_state is None or state.used_smalls == prev_state.used_smalls:
return False
new_bk_doors = set(state.big_doors).difference(set(prev_state.big_doors))
state_copy = state.copy()
while len(new_bk_doors) > 0:
for door in new_bk_doors:
open_a_door(door.door, state_copy, key_layout.flat_prop)
new_bk_doors = set(state_copy.big_doors).difference(set(prev_state.big_doors))
expand_key_state(state_copy, key_layout.flat_prop, world, player)
new_locations = set(state_copy.found_locations).difference(set(prev_state.found_locations))
important_found = False
for loc in new_locations:
important_found |= important_location(loc, world, player)
if not important_found:
return False
new_small_doors = set(state.small_doors).difference(set(prev_state.small_doors))
if len(new_small_doors) > 0:
return False
return prev_avail - 1 == 0
def enough_small_locations(state, avail_small_loc):
unique_d_set = set()
for exp_door in state.small_doors:
door = exp_door.door
if door not in unique_d_set and door.dest not in unique_d_set:
unique_d_set.add(door)
return avail_small_loc >= len(unique_d_set)
def cnt_avail_small_locations(free_locations, key_only, state, world, player):
if not world.keyshuffle[player] and not world.retro[player]:
bk_adj = 1 if state.big_key_opened and not state.big_key_special else 0
avail_chest_keys = min(free_locations - bk_adj, state.key_locations - key_only)
return max(0, avail_chest_keys + key_only - state.used_smalls)
return state.key_locations - state.used_smalls
def cnt_avail_big_locations(ttl_locations, state, world, player):
if not world.bigkeyshuffle[player]:
return max(0, ttl_locations - state.used_locations) if not state.big_key_special else 0
return 1 if not state.big_key_special else 0
def create_key_counters(key_layout, world, player):
key_counters = {}
flat_proposal = key_layout.flat_prop
state = ExplorationState(dungeon=key_layout.sector.name)
if world.doorShuffle[player] == 'vanilla':
state.key_locations = len(world.get_dungeon(key_layout.sector.name, player).small_keys)
else:
state.key_locations = world.dungeon_layouts[player][key_layout.sector.name].key_doors_num
state.big_key_special, special_region = False, None
for region in key_layout.sector.regions:
for location in region.locations:
if location.forced_big_key():
state.big_key_special = True
special_region = region
for region in key_layout.start_regions:
state.visit_region(region, key_checks=True)
state.add_all_doors_check_keys(region, flat_proposal, world, player)
expand_key_state(state, flat_proposal, world, player)
code = state_id(state, key_layout.flat_prop)
key_counters[code] = create_key_counter(state, key_layout, world, player)
queue = deque([(key_counters[code], state)])
while len(queue) > 0:
next_key_counter, parent_state = queue.popleft()
for door in next_key_counter.child_doors:
child_state = parent_state.copy()
if door.bigKey or door.name in special_big_key_doors:
key_layout.key_logic.bk_doors.add(door)
# open the door, if possible
if not door.bigKey or not child_state.big_key_special or child_state.visited_at_all(special_region):
open_a_door(door, child_state, flat_proposal)
expand_key_state(child_state, flat_proposal, world, player)
code = state_id(child_state, key_layout.flat_prop)
if code not in key_counters.keys():
child_kr = create_key_counter(child_state, key_layout, world, player)
key_counters[code] = child_kr
queue.append((child_kr, child_state))
return key_counters
def create_key_counter(state, key_layout, world, player):
key_counter = KeyCounter(key_layout.max_chests)
key_counter.child_doors.update(dict.fromkeys(unique_doors(state.small_doors+state.big_doors)))
for loc in state.found_locations:
if important_location(loc, world, player):
key_counter.important_location = True
key_counter.other_locations[loc] = None
key_counter.important_locations[loc] = None
elif loc.forced_item and loc.item.name == key_layout.key_logic.small_key_name:
key_counter.key_only_locations[loc] = None
elif loc.forced_item and loc.item.name == key_layout.key_logic.bk_name:
key_counter.other_locations[loc] = None
elif loc.name not in dungeon_events:
key_counter.free_locations[loc] = None
else:
key_counter.other_locations[loc] = None
key_counter.open_doors.update(dict.fromkeys(state.opened_doors))
key_counter.used_keys = count_unique_sm_doors(state.opened_doors)
key_counter.big_key_opened = state.big_key_opened
return key_counter
imp_locations = None
def imp_locations_factory(world, player):
global imp_locations
if imp_locations:
return imp_locations
imp_locations = ['Agahnim 1', 'Agahnim 2', 'Attic Cracked Floor', 'Suspicious Maiden']
if world.mode[player] == 'standard':
imp_locations.append('Zelda Pickup')
imp_locations.append('Zelda Dropoff')
return imp_locations
def important_location(loc, world, player):
return '- Prize' in loc.name or loc.name in imp_locations_factory(world, player) or (loc.forced_big_key())
def create_odd_key_counter(door, parent_counter, key_layout, world, player):
odd_counter = KeyCounter(key_layout.max_chests)
next_counter = find_next_counter(door, parent_counter, key_layout)
odd_counter.free_locations = dict_difference(next_counter.free_locations, parent_counter.free_locations)
odd_counter.key_only_locations = dict_difference(next_counter.key_only_locations, parent_counter.key_only_locations)
odd_counter.child_doors = {}
for d in next_counter.child_doors:
if d not in parent_counter.child_doors and (d.type == DoorType.SpiralStairs or d.dest not in parent_counter.child_doors):
odd_counter.child_doors[d] = None
odd_counter.other_locations = dict_difference(next_counter.other_locations, parent_counter.other_locations)
odd_counter.important_locations = dict_difference(next_counter.important_locations, parent_counter.important_locations)
for loc in odd_counter.other_locations:
if important_location(loc, world, player):
odd_counter.important_location = True
return odd_counter
def dict_difference(dict_a, dict_b):
return dict.fromkeys([x for x in dict_a.keys() if x not in dict_b.keys()])
def dict_intersection(dict_a, dict_b):
return dict.fromkeys([x for x in dict_a.keys() if x in dict_b.keys()])
def state_id(state, flat_proposal):
s_id = '1' if state.big_key_opened else '0'
for d in flat_proposal:
s_id += '1' if d in state.opened_doors else '0'
return s_id
def find_counter(opened_doors, bk_hint, key_layout, raise_on_error=True):
counter = find_counter_hint(opened_doors, bk_hint, key_layout)
if counter is not None:
return counter
more_doors = []
for door in opened_doors.keys():
more_doors.append(door)
if door.dest not in opened_doors.keys():
more_doors.append(door.dest)
if len(more_doors) > len(opened_doors.keys()):
counter = find_counter_hint(dict.fromkeys(more_doors), bk_hint, key_layout)
if counter is not None:
return counter
if raise_on_error:
raise Exception('Unable to find door permutation. Init CID: %s' % counter_id(opened_doors, bk_hint, key_layout.flat_prop))
return None
def find_counter_hint(opened_doors, bk_hint, key_layout):
cid = counter_id(opened_doors, bk_hint, key_layout.flat_prop)
if cid in key_layout.key_counters.keys():
return key_layout.key_counters[cid]
if not bk_hint:
cid = counter_id(opened_doors, True, key_layout.flat_prop)
if cid in key_layout.key_counters.keys():
return key_layout.key_counters[cid]
return None
def find_max_counter(key_layout):
max_counter = find_counter_hint(dict.fromkeys(key_layout.flat_prop), False, key_layout)
if max_counter is None:
raise Exception("Max Counter is none - something is amiss")
if len(max_counter.child_doors) > 0:
max_counter = find_counter_hint(dict.fromkeys(key_layout.flat_prop), True, key_layout)
return max_counter
def counter_id(opened_doors, bk_unlocked, flat_proposal):
s_id = '1' if bk_unlocked else '0'
for d in flat_proposal:
s_id += '1' if d in opened_doors.keys() else '0'
return s_id
def cid(counter, key_layout):
return counter_id(counter.open_doors, counter.big_key_opened, key_layout.flat_prop)
# class SoftLockException(Exception):
# pass
# vanilla validation code
def validate_vanilla_key_logic(world, player):
validators = {
'Hyrule Castle': val_hyrule,
'Eastern Palace': val_eastern,
'Desert Palace': val_desert,
'Tower of Hera': val_hera,
'Agahnims Tower': val_tower,
'Palace of Darkness': val_pod,
'Swamp Palace': val_swamp,
'Skull Woods': val_skull,
'Thieves Town': val_thieves,
'Ice Palace': val_ice,
'Misery Mire': val_mire,
'Turtle Rock': val_turtle,
'Ganons Tower': val_ganons
}
key_logic_dict = world.key_logic[player]
for key, key_logic in key_logic_dict.items():
validators[key](key_logic, world, player)
def val_hyrule(key_logic, world, player):
if world.mode[player] == 'standard':
val_rule(key_logic.door_rules['Hyrule Dungeon Map Room Key Door S'], 1)
val_rule(key_logic.door_rules['Hyrule Dungeon Armory Interior Key Door N'], 2)
val_rule(key_logic.door_rules['Sewers Dark Cross Key Door N'], 3)
val_rule(key_logic.door_rules['Sewers Key Rat Key Door N'], 4)
else:
val_rule(key_logic.door_rules['Sewers Secret Room Key Door S'], 2)
val_rule(key_logic.door_rules['Sewers Dark Cross Key Door N'], 2)
val_rule(key_logic.door_rules['Hyrule Dungeon Map Room Key Door S'], 2)
val_rule(key_logic.door_rules['Hyrule Dungeon Armory Interior Key Door N'], 4)
def val_eastern(key_logic, world, player):
val_rule(key_logic.door_rules['Eastern Dark Square Key Door WN'], 2, True, 'Eastern Palace - Big Key Chest', 1, {'Eastern Palace - Big Key Chest'})
val_rule(key_logic.door_rules['Eastern Darkness Up Stairs'], 2)
assert world.get_location('Eastern Palace - Big Chest', player) in key_logic.bk_restricted
assert world.get_location('Eastern Palace - Boss', player) in key_logic.bk_restricted
assert len(key_logic.bk_restricted) == 2
def val_desert(key_logic, world, player):
val_rule(key_logic.door_rules['Desert East Wing Key Door EN'], 4)
val_rule(key_logic.door_rules['Desert Tiles 1 Up Stairs'], 2)
val_rule(key_logic.door_rules['Desert Beamos Hall NE'], 3)
val_rule(key_logic.door_rules['Desert Tiles 2 NE'], 4)
assert world.get_location('Desert Palace - Big Chest', player) in key_logic.bk_restricted
assert world.get_location('Desert Palace - Boss', player) in key_logic.bk_restricted
assert len(key_logic.bk_restricted) == 2
def val_hera(key_logic, world, player):
val_rule(key_logic.door_rules['Hera Lobby Key Stairs'], 1, True, 'Tower of Hera - Big Key Chest')
assert world.get_location('Tower of Hera - Big Chest', player) in key_logic.bk_restricted
assert world.get_location('Tower of Hera - Compass Chest', player) in key_logic.bk_restricted
assert world.get_location('Tower of Hera - Boss', player) in key_logic.bk_restricted
assert len(key_logic.bk_restricted) == 3
def val_tower(key_logic, world, player):
val_rule(key_logic.door_rules['Tower Room 03 Up Stairs'], 1)
val_rule(key_logic.door_rules['Tower Dark Maze ES'], 2)
val_rule(key_logic.door_rules['Tower Dark Archers Up Stairs'], 3)
val_rule(key_logic.door_rules['Tower Circle of Pots ES'], 4)
def val_pod(key_logic, world, player):
val_rule(key_logic.door_rules['PoD Arena Main NW'], 4)
val_rule(key_logic.door_rules['PoD Basement Ledge Up Stairs'], 6, True, 'Palace of Darkness - Big Key Chest')
val_rule(key_logic.door_rules['PoD Compass Room SE'], 6, True, 'Palace of Darkness - Harmless Hellway')
val_rule(key_logic.door_rules['PoD Falling Bridge WN'], 6)
val_rule(key_logic.door_rules['PoD Dark Pegs WN'], 6)
assert world.get_location('Palace of Darkness - Big Chest', player) in key_logic.bk_restricted
assert world.get_location('Palace of Darkness - Boss', player) in key_logic.bk_restricted
assert len(key_logic.bk_restricted) == 2
def val_swamp(key_logic, world, player):
val_rule(key_logic.door_rules['Swamp Entrance Down Stairs'], 1)
val_rule(key_logic.door_rules['Swamp Pot Row WS'], 2)
val_rule(key_logic.door_rules['Swamp Trench 1 Key Ledge NW'], 3)
val_rule(key_logic.door_rules['Swamp Hub North Ledge N'], 5)
val_rule(key_logic.door_rules['Swamp Hub WN'], 6)
val_rule(key_logic.door_rules['Swamp Waterway NW'], 6)
assert world.get_location('Swamp Palace - Entrance', player) in key_logic.bk_restricted
assert len(key_logic.bk_restricted) == 1
def val_skull(key_logic, world, player):
val_rule(key_logic.door_rules['Skull 3 Lobby NW'], 4)
val_rule(key_logic.door_rules['Skull Spike Corner ES'], 5)
def val_thieves(key_logic, world, player):
val_rule(key_logic.door_rules['Thieves Hallway WS'], 1)
val_rule(key_logic.door_rules['Thieves Spike Switch Up Stairs'], 3)
val_rule(key_logic.door_rules['Thieves Conveyor Bridge WS'], 3, True, 'Thieves\' Town - Big Chest')
assert world.get_location('Thieves\' Town - Attic', player) in key_logic.bk_restricted
assert world.get_location('Thieves\' Town - Boss', player) in key_logic.bk_restricted
assert world.get_location('Thieves\' Town - Blind\'s Cell', player) in key_logic.bk_restricted
assert world.get_location('Thieves\' Town - Big Chest', player) in key_logic.bk_restricted
assert len(key_logic.bk_restricted) == 4
def val_ice(key_logic, world, player):
val_rule(key_logic.door_rules['Ice Jelly Key Down Stairs'], 1)
val_rule(key_logic.door_rules['Ice Conveyor SW'], 2)
val_rule(key_logic.door_rules['Ice Backwards Room Down Stairs'], 5)
assert world.get_location('Ice Palace - Boss', player) in key_logic.bk_restricted
assert world.get_location('Ice Palace - Big Chest', player) in key_logic.bk_restricted
assert len(key_logic.bk_restricted) == 2
def val_mire(key_logic, world, player):
mire_west_wing = {'Misery Mire - Big Key Chest', 'Misery Mire - Compass Chest'}
val_rule(key_logic.door_rules['Mire Spikes NW'], 3) # todo: is sometimes 3 or 5? best_counter order matters
# val_rule(key_logic.door_rules['Mire Spike Barrier NE'], 4) # kind of a waste mostly
val_rule(key_logic.door_rules['Mire Hub WS'], 5, False, None, 3, mire_west_wing)
val_rule(key_logic.door_rules['Mire Conveyor Crystal WS'], 6, False, None, 4, mire_west_wing)
assert world.get_location('Misery Mire - Boss', player) in key_logic.bk_restricted
assert world.get_location('Misery Mire - Big Chest', player) in key_logic.bk_restricted
assert len(key_logic.bk_restricted) == 2
def val_turtle(key_logic, world, player):
# todo: check vanilla key logic when TR back doors are accessible
if world.shuffle[player] == 'vanilla' and world.mode[player] != 'inverted':
val_rule(key_logic.door_rules['TR Hub NW'], 1)
val_rule(key_logic.door_rules['TR Pokey 1 NW'], 2)
val_rule(key_logic.door_rules['TR Chain Chomps Down Stairs'], 3)
val_rule(key_logic.door_rules['TR Pokey 2 ES'], 6, True, 'Turtle Rock - Big Key Chest', 4, {'Turtle Rock - Big Key Chest'})
val_rule(key_logic.door_rules['TR Crystaroller Down Stairs'], 5)
val_rule(key_logic.door_rules['TR Dash Bridge WS'], 6)
assert world.get_location('Turtle Rock - Eye Bridge - Bottom Right', player) in key_logic.bk_restricted
assert world.get_location('Turtle Rock - Eye Bridge - Top Left', player) in key_logic.bk_restricted
assert world.get_location('Turtle Rock - Eye Bridge - Top Right', player) in key_logic.bk_restricted
assert world.get_location('Turtle Rock - Eye Bridge - Bottom Left', player) in key_logic.bk_restricted
assert world.get_location('Turtle Rock - Boss', player) in key_logic.bk_restricted
assert world.get_location('Turtle Rock - Crystaroller Room', player) in key_logic.bk_restricted
assert world.get_location('Turtle Rock - Big Chest', player) in key_logic.bk_restricted
assert len(key_logic.bk_restricted) == 7
def val_ganons(key_logic, world, player):
rando_room = {'Ganons Tower - Randomizer Room - Top Left', 'Ganons Tower - Randomizer Room - Top Right', 'Ganons Tower - Randomizer Room - Bottom Left', 'Ganons Tower - Randomizer Room - Bottom Right'}
compass_room = {'Ganons Tower - Compass Room - Top Left', 'Ganons Tower - Compass Room - Top Right', 'Ganons Tower - Compass Room - Bottom Left', 'Ganons Tower - Compass Room - Bottom Right'}
gt_middle = {'Ganons Tower - Big Key Room - Left', 'Ganons Tower - Big Key Chest', 'Ganons Tower - Big Key Room - Right', 'Ganons Tower - Bob\'s Chest', 'Ganons Tower - Big Chest'}
val_rule(key_logic.door_rules['GT Double Switch EN'], 6, False, None, 4, rando_room.union({'Ganons Tower - Firesnake Room'}))
val_rule(key_logic.door_rules['GT Hookshot ES'], 7, False, 'Ganons Tower - Map Chest', 5, {'Ganons Tower - Map Chest'})
val_rule(key_logic.door_rules['GT Tile Room EN'], 6, False, None, 5, compass_room)
val_rule(key_logic.door_rules['GT Firesnake Room SW'], 7, False, None, 5, rando_room)
val_rule(key_logic.door_rules['GT Conveyor Star Pits EN'], 6, False, None, 5, gt_middle) # should be 7?
val_rule(key_logic.door_rules['GT Mini Helmasaur Room WN'], 6) # not sure about this 6...
val_rule(key_logic.door_rules['GT Crystal Circles SW'], 8)
assert world.get_location('Ganons Tower - Mini Helmasaur Room - Left', player) in key_logic.bk_restricted
assert world.get_location('Ganons Tower - Mini Helmasaur Room - Right', player) in key_logic.bk_restricted
assert world.get_location('Ganons Tower - Big Chest', player) in key_logic.bk_restricted
assert world.get_location('Ganons Tower - Pre-Moldorm Chest', player) in key_logic.bk_restricted
assert world.get_location('Ganons Tower - Validation Chest', player) in key_logic.bk_restricted
assert len(key_logic.bk_restricted) == 5
def val_rule(rule, skn, allow=False, loc=None, askn=None, setCheck=None):
if setCheck is None:
setCheck = set()
assert rule.small_key_num == skn
assert rule.allow_small == allow
assert rule.small_location == loc or rule.small_location.name == loc
assert rule.alternate_small_key == askn
assert len(setCheck) == len(rule.alternate_big_key_loc)
for loc in rule.alternate_big_key_loc:
assert loc.name in setCheck
# Soft lock stuff
def validate_key_placement(key_layout, world, player):
if world.retro[player] or world.accessibility[player] == 'none':
return True # Can't keylock in retro. Expected if beatable only.
max_counter = find_max_counter(key_layout)
keys_outside = 0
big_key_outside = False
smallkey_name = dungeon_keys[key_layout.sector.name]
bigkey_name = dungeon_bigs[key_layout.sector.name]
if world.keyshuffle[player]:
keys_outside = key_layout.max_chests - sum(1 for i in max_counter.free_locations if i.item is not None and i.item.name == smallkey_name and i.item.player == player)
if world.bigkeyshuffle[player]:
max_counter = find_max_counter(key_layout)
big_key_outside = bigkey_name not in (l.item.name for l in max_counter.free_locations if l.item)
for code, counter in key_layout.key_counters.items():
if len(counter.child_doors) == 0:
continue
if key_layout.big_key_special:
big_found = any(i.forced_item is not None and i.item.bigkey for i in counter.other_locations) or big_key_outside
else:
big_found = any(i.item is not None and i.item.name == bigkey_name for i in counter.free_locations if "- Big Chest" not in i.name) or big_key_outside
if counter.big_key_opened and not big_found:
continue # Can't get to this state
found_locations = set(i for i in counter.free_locations if big_found or "- Big Chest" not in i.name)
found_keys = sum(1 for i in found_locations if i.item is not None and i.item.name == smallkey_name and i.item.player == player) + \
len(counter.key_only_locations) + keys_outside
can_progress = (not counter.big_key_opened and big_found and any(d.bigKey for d in counter.child_doors)) or \
found_keys > counter.used_keys and any(not d.bigKey for d in counter.child_doors)
if not can_progress:
missing_locations = set(max_counter.free_locations.keys()).difference(found_locations)
missing_items = [l for l in missing_locations if l.item is None or (l.item.name != smallkey_name and l.item.name != bigkey_name) or "- Boss" in l.name]
# missing_key_only = set(max_counter.key_only_locations.keys()).difference(counter.key_only_locations.keys()) # do freestanding keys matter for locations?
if len(missing_items) > 0: # world.accessibility[player]=='locations' and (len(missing_locations)>0 or len(missing_key_only) > 0):
logging.getLogger('').error("Keylock - can't open locations: ")
logging.getLogger('').error("code: " + code)
for i in missing_locations:
logging.getLogger('').error(i)
return False
return True
|
the-stack_0_11732 | """
Test calling user defined functions using expression evaluation.
This test checks that typesystem lookup works correctly for typedefs of
untagged structures.
Ticket: https://llvm.org/bugs/show_bug.cgi?id=26790
"""
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TestExprLookupAnonStructTypedef(TestBase):
mydir = TestBase.compute_mydir(__file__)
@expectedFailureAll(
oslist=['linux'],
archs=['arm'],
bugnumber="llvm.org/pr27868")
def test(self):
"""Test typedeffed untagged struct arguments for function call expressions"""
self.build()
lldbutil.run_to_source_breakpoint(self, "// break here", lldb.SBFileSpec("main.cpp"))
self.expect_expr("multiply(&s)", result_type="double", result_value="1")
|
the-stack_0_11733 | from random import randint
from pygame import *
class GameSprite(sprite.Sprite):
def __init__(self, player_image, player_speed, player_x, player_y):
super().__init__()
self.image = transform.scale(image.load(player_image),(65, 65))
self.speed = player_speed
self.rect = self.image.get_rect()
self.rect.x = player_x
self.rect.y = player_y
def reset(self):
window.blit(self.image, (self.rect.x, self.rect.y))
background = transform.scale(image.load('desert.jpg'),(1100, 900))
class Player(GameSprite):
def update(self):
keys_pressed = key.get_pressed()
if keys_pressed[K_a] and self.rect.x > 0:
self.rect.x -= self.speed
if keys_pressed[K_d] and self.rect.x < 595:
self.rect.x += self.speed
if keys_pressed[K_w] and self.rect.y > 0:
self.rect.y -= self.speed
if keys_pressed[K_s] and self.rect.y < 395:
self.rect.y += self.speed
class Ball (GameSprite):
def hod(self):
self.rect.x += self.speed
self.rect.y += self.speed
window = display.set_mode((1100,900))
display.set_caption('Shooter')
background = transform.scale(image.load('desert.jpg'),(1100, 900))
run = True
FPS = 60
clock = time.Clock()
finish = False
score = 0
rocket = Player('raket.png', 30, 50, 450)
rocket1 = Player('raket.png', 30, 1050, 450)
shar = Ball('bol.png', 40, 550, 450)
while run:
for i in event.get():
if e.type == QUIT:
run = False
rocket.reset()
rocket1.reset()
time.delay(50) |
the-stack_0_11734 | from . import _nnls
from numpy import asarray_chkfinite, zeros, double
__all__ = ['nnls']
def nnls(A, b, maxiter=None):
"""
Solve ``argmin_x || Ax - b ||_2`` for ``x>=0``. This is a wrapper
for a FORTRAN non-negative least squares solver.
Parameters
----------
A : ndarray
Matrix ``A`` as shown above.
b : ndarray
Right-hand side vector.
maxiter: int, optional
Maximum number of iterations, optional.
Default is ``3 * A.shape[1]``.
Returns
-------
x : ndarray
Solution vector.
rnorm : float
The residual, ``|| Ax-b ||_2``.
See Also
--------
lsq_linear : Linear least squares with bounds on the variables
Notes
-----
The FORTRAN code was published in the book below. The algorithm
is an active set method. It solves the KKT (Karush-Kuhn-Tucker)
conditions for the non-negative least squares problem.
References
----------
Lawson C., Hanson R.J., (1987) Solving Least Squares Problems, SIAM
Examples
--------
>>> from scipy.optimize import nnls
...
>>> A = np.array([[1, 0], [1, 0], [0, 1]])
>>> b = np.array([2, 1, 1])
>>> nnls(A, b)
(array([1.5, 1. ]), 0.7071067811865475)
>>> b = np.array([-1, -1, -1])
>>> nnls(A, b)
(array([0., 0.]), 1.7320508075688772)
"""
A, b = map(asarray_chkfinite, (A, b))
if len(A.shape) != 2:
raise ValueError("Expected a two-dimensional array (matrix)" +
", but the shape of A is %s" % (A.shape, ))
if len(b.shape) != 1:
raise ValueError("Expected a one-dimensional array (vector" +
", but the shape of b is %s" % (b.shape, ))
m, n = A.shape
if m != b.shape[0]:
raise ValueError(
"Incompatible dimensions. The first dimension of " +
"A is %s, while the shape of b is %s" % (m, (b.shape[0], )))
maxiter = -1 if maxiter is None else int(maxiter)
w = zeros((n,), dtype=double)
zz = zeros((m,), dtype=double)
index = zeros((n,), dtype=int)
x, rnorm, mode = _nnls.nnls(A, m, n, b, w, zz, index, maxiter)
if mode != 1:
raise RuntimeError("too many iterations")
return x, rnorm
|
the-stack_0_11735 | #!/usr/bin/env python3
# coding: utf8
"""
Loads and handels training and validation data collections.
"""
__author__ = 'David Flury, Andreas Kaufmann, Raphael Müller'
__email__ = "[email protected]"
import hashlib
import glob
import os
import random
from unmix.source.configuration import Configuration
from unmix.source.data.song import Song
from unmix.source.logging.logger import Logger
class DataLoader(object):
@staticmethod
def load(path=None, test_data_count=None):
if path is None:
folders = Configuration.get('collection.folders')
if folders is None:
return DataLoader.loadDataset(Configuration.get('collection.folder', optional=False), test_data_count)
else:
return DataLoader.loadMultipleDatasets(folders, test_data_count)
else:
return DataLoader.loadDataset(path, test_data_count)
@staticmethod
def loadDataset(path, test_data_count):
files = DataLoader.loadFiles(path)
training_files, validation_files, test_files = DataLoader.splitDataset(files, test_data_count)
Logger.debug(
"Found %d songs for training and %d songs for validation." % (len(training_files), len(validation_files)))
if test_files is not None:
test_frequency = Configuration.get('collection.test_frequency', default=0)
Logger.debug("Use %d songs for tests after every %d epoch." % (len(test_files), test_frequency))
if len(training_files) == 0:
Logger.warn("No training files assigned.")
if len(validation_files) == 0:
Logger.warn("No validation files assigned.")
return training_files, validation_files, test_files
@staticmethod
def loadFiles(path, ignore_song_limit=False):
if path is None:
path = Configuration.get_path('collection.folder', False)
instrument_filter = os.path.join(path, '**', '%s*.wav' % Song.PREFIX_INSTRUMENT)
files_instrument = [os.path.dirname(file) for file in glob.iglob(instrument_filter, recursive=True)]
rest_filter = os.path.join(path, '**', '%s*.wav' % Song.PREFIX_REST)
files_rest = [os.path.dirname(file) for file in glob.iglob(rest_filter, recursive=True)]
files = [f for f in files_instrument if f in files_rest] # make sure both instrument and rest file exists
skipped_count = len(set(files_instrument) - set(files_rest)) + len(set(files_rest) - set(files_instrument))
Logger.debug(f"Skipped {skipped_count} files (incomplete instrument/rest pair)")
# Sort files by hash value of folder to guarantee a consistent order
files.sort(key=lambda x: hashlib.md5(os.path.basename(x).encode('utf-8', 'surrogatepass')).hexdigest())
song_limit = Configuration.get('collection.song_limit', default=0)
if not ignore_song_limit and song_limit > 0:
if song_limit <= 1: # Configuration as percentage share
song_limit = song_limit * len(files)
song_limit = min(int(song_limit), len(files))
files = files[:song_limit]
return files
@staticmethod
def splitDataset(files, test_data_count):
test_files = None
test_frequency = Configuration.get('collection.test_frequency', default=0)
if not test_data_count:
test_data_count = Configuration.get('collection.test_data_count', default=0)
if test_data_count > 0:
test_data_count = int(test_data_count)
test_files = files[-test_data_count:]
files = files[:len(files) - test_data_count]
validation_ratio = Configuration.get('collection.validation_ratio', default=0.2)
validation_files = files[:int(len(files) * validation_ratio)]
training_files = files[len(validation_files):]
return training_files, validation_files, test_files
@staticmethod
def loadMultipleDatasets(folders, test_data_count):
datasets = []
ratio_sum = 0
smallest_dataset_length = None
smallest_dataset_ratio = None
for folder in folders:
ratio = folder['ratio']
dataset = DataLoader.loadFiles(folder['path'], True)
datasets.append((dataset, ratio, folder['path']))
ratio_sum = ratio_sum + ratio
dataset_length = len(dataset)
if smallest_dataset_length is None or dataset_length < smallest_dataset_length:
smallest_dataset_length = dataset_length
smallest_dataset_ratio = ratio
target_song_count = ratio_sum / smallest_dataset_ratio * smallest_dataset_length
song_limit = Configuration.get('collection.song_limit', default=0)
if song_limit < target_song_count:
if song_limit >= 1:
target_song_count = song_limit
elif song_limit > 0:
target_song_count = target_song_count * song_limit
training_files = []
validation_files = []
test_files = []
for dataset, ratio, folder in datasets:
requested_file_count = int(ratio / ratio_sum * target_song_count)
files = dataset[:requested_file_count]
print('Loaded %s files from %s' % (len(files), folder))
training, validation, test = DataLoader.splitDataset(files, test_data_count)
training_files.extend(training)
validation_files.extend(validation)
if test is not None:
test_files.extend(test)
return training_files, validation_files, test_files
|
the-stack_0_11737 | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyPybigwig(PythonPackage):
"""A package for accessing bigWig files using libBigWig."""
pypi = "pyBigWig/pyBigWig-0.3.4.tar.gz"
version('0.3.12', sha256='e01991790ece496bf6d3f00778dcfb136dd9ca0fd28acc1b3fb43051ad9b8403')
version('0.3.4', sha256='8c97a19218023190041c0e426f1544f7a4944a7bb4568faca1d85f1975af9ee2')
variant('numpy', default=True,
description='Enable support for numpy integers and vectors')
patch('python3_curl.patch', when='@:0.3.12 ^python@3:')
depends_on('curl', type=('build', 'link', 'run'))
depends_on('py-setuptools', type='build')
depends_on('py-numpy', type=('build', 'run'), when='+numpy')
|
the-stack_0_11738 | import subprocess
import os
def arg():
try:
import sys
return sys.argv[1]
except:
None
inputfile = input("Enter the file to parse:")
outputfile = input("Enter the file to output to: ")
if os.path.exists("my_filters_001"):
os.chdir("my_filters_001")
subprocess.call(["git pull"],shell=True)
os.chdir("..")
else:
subprocess.call(["git clone https://github.com/iam-py-test/my_filters_001.git"],shell=True)
alt = open("my_filters_001/Alternative list formats/{}".format(outputfile),"w")
with open("my_filters_001/{}".format(inputfile)) as f:
lines = f.read().split("\n")
for line in lines:
if line.startswith("||"):
continue
elif line.startswith("!"):
if arg() != "--nocomment":
alt.write(line.replace("!","#"))
alt.write("\n")
elif line != "":
alt.write("127.0.0.1 {}".format(line.split("$")[0]))
alt.write("\n")
alt.close()
os.chdir("my_filters_001")
subprocess.call(["git add ."],shell=True)
subprocess.call(["git commit -m \"[bot] add alt list\""],shell=True)
subprocess.call(["git push"],shell=True)
|
the-stack_0_11740 | #!/usr/bin/env python
# Copyright 2015-2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Usage: ./check_marathon_services_replication.py [options]
This is a script that checks the number of HAProxy backends via Synapse against
the expected amount that should've been deployed via Marathon in a mesos cluster.
Basically, the script checks smartstack.yaml for listed namespaces, and then queries
Synapse for the number of available backends for that namespace. It then goes through
the Marathon service configuration file for that cluster, and sees how many instances
are expected to be available for that namespace based on the number of instances deployed
on that namespace.
After retrieving that information, a fraction of available instances is calculated
(available/expected), and then compared against a threshold. The default threshold is
50, meaning if less than 50% of a service's backends are available, the script sends
CRITICAL. If replication_threshold is defined in the yelpsoa config for a service
instance then it will be used instead.
"""
import logging
from datetime import datetime
from datetime import timedelta
from datetime import timezone
from typing import Optional
from typing import Sequence
from marathon.models.task import MarathonTask
from paasta_tools import marathon_tools
from paasta_tools import monitoring_tools
from paasta_tools.check_services_replication_tools import main
from paasta_tools.long_running_service_tools import get_proxy_port_for_instance
from paasta_tools.marathon_tools import format_job_id
from paasta_tools.marathon_tools import MarathonServiceConfig
from paasta_tools.smartstack_tools import MesosSmartstackEnvoyReplicationChecker
log = logging.getLogger(__name__)
def filter_healthy_marathon_instances_for_short_app_id(all_tasks, app_id):
tasks_for_app = [
task for task in all_tasks if task.app_id.startswith("/%s" % app_id)
]
one_minute_ago = datetime.now(timezone.utc) - timedelta(minutes=1)
healthy_tasks = []
for task in tasks_for_app:
if (
marathon_tools.is_task_healthy(task, default_healthy=True)
and task.started_at is not None
and task.started_at < one_minute_ago
):
healthy_tasks.append(task)
return len(healthy_tasks)
def check_healthy_marathon_tasks_for_service_instance(
instance_config, expected_count, all_tasks, dry_run=False,
):
app_id = format_job_id(instance_config.service, instance_config.instance)
num_healthy_tasks = filter_healthy_marathon_instances_for_short_app_id(
all_tasks=all_tasks, app_id=app_id
)
log.info("Checking %s in marathon as it is not in smartstack" % app_id)
monitoring_tools.send_replication_event_if_under_replication(
instance_config=instance_config,
expected_count=expected_count,
num_available=num_healthy_tasks,
dry_run=dry_run,
)
def check_service_replication(
instance_config: MarathonServiceConfig,
all_tasks_or_pods: Sequence[MarathonTask],
replication_checker: MesosSmartstackEnvoyReplicationChecker,
dry_run: bool = False,
) -> Optional[bool]:
"""Checks a service's replication levels based on how the service's replication
should be monitored. (smartstack/envoy or mesos)
:param instance_config: an instance of MarathonServiceConfig
:param replication_checker: an instance of MesosSmartstackEnvoyReplicationChecker
"""
expected_count = instance_config.get_instances()
log.info(
"Expecting %d total tasks for %s" % (expected_count, instance_config.job_id)
)
proxy_port = get_proxy_port_for_instance(instance_config)
registrations = instance_config.get_registrations()
# if the primary registration does not match the service_instance name then
# the best we can do is check marathon for replication (for now).
if proxy_port is not None and registrations[0] == instance_config.job_id:
is_well_replicated = monitoring_tools.check_replication_for_instance(
instance_config=instance_config,
expected_count=expected_count,
replication_checker=replication_checker,
dry_run=dry_run,
)
return is_well_replicated
else:
check_healthy_marathon_tasks_for_service_instance(
instance_config=instance_config,
expected_count=expected_count,
all_tasks=all_tasks_or_pods,
dry_run=dry_run,
)
return None
if __name__ == "__main__":
main(
instance_type_class=marathon_tools.MarathonServiceConfig,
check_service_replication=check_service_replication,
namespace=None, # not relevant for mesos
mesos=True,
)
|
the-stack_0_11743 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from aria.orchestrator.workflows.api import task
from aria.orchestrator.workflows.builtin.install import install
from tests import mock
from tests import storage
from . import assert_node_install_operations
@pytest.fixture
def ctx(tmpdir):
context = mock.context.simple(str(tmpdir),
topology=mock.topology.create_simple_topology_three_nodes)
yield context
storage.release_sqlite_storage(context.model)
def test_install(ctx):
install_tasks = list(task.WorkflowTask(install, ctx=ctx).topological_order(True))
assert len(install_tasks) == 3
dependency_node_subgraph1, dependency_node_subgraph2, dependent_node_subgraph = install_tasks
dependent_node_tasks = list(dependent_node_subgraph.topological_order(reverse=True))
dependency_node1_tasks = list(dependency_node_subgraph1.topological_order(reverse=True))
dependency_node2_tasks = list(dependency_node_subgraph2.topological_order(reverse=True))
assert_node_install_operations(dependency_node1_tasks)
assert_node_install_operations(dependency_node2_tasks)
assert_node_install_operations(dependent_node_tasks, relationships=2)
|
the-stack_0_11744 | from logging import Logger
from typing import Optional
from widgetastic.browser import Browser
from widgetastic.types import ViewParent
from widgetastic.utils import ParametrizedLocator
from widgetastic.widget.base import ClickableMixin
from widgetastic.widget.base import View
from widgetastic.widget.base import Widget
from widgetastic.xpath import quote
class OUIABase:
"""
Base class for ``OUIA`` support. According to the spec ``OUIA`` compatible components may
have the following attributes in the root level HTML element:
* data-ouia-component-type
* data-ouia-component-id
* data-ouia-safe
https://ouia.readthedocs.io/en/latest/README.html#ouia-component
"""
ROOT = ParametrizedLocator(
".//*[@data-ouia-component-type={@component_type}{@component_id_suffix}]"
)
browser: Browser
def _set_attrs(
self,
component_type: str,
component_id: Optional[str] = None,
) -> None:
self.component_type = quote(component_type)
self.component_id = quote(component_id)
component_id = f" and @data-ouia-component-id={quote(component_id)}" if component_id else ""
self.component_id_suffix = component_id
self.locator = self.ROOT.locator
@property
def is_safe(self) -> bool:
"""
An attribute called data-ouia-safe, which is True only when the component is in a static
state, i.e. no animations are occurring. At all other times, this value MUST be False.
"""
return "true" in self.browser.get_attribute("data-ouia-safe", self)
def __locator__(self) -> ParametrizedLocator:
return self.ROOT
def __repr__(self):
component_id_suffix = f"; ouia id: {self.component_id}" if self.component_id else ""
desc = f"ouia type: {self.component_type}{component_id_suffix}"
return f"<{type(self).__name__}; {desc}>"
class OUIAGenericView(OUIABase, View):
"""A base class for any OUIA compatible view.
Children classes must have the same name as the value of ``data-ouia-component-type`` attribute
of the root HTML element.
Args:
component_id: value of data-ouia-component-id attribute.
component_type: value of data-ouia-component-type attribute.
"""
OUIA_COMPONENT_TYPE: str
OUIA_ID: Optional[str]
def __init__(
self,
parent: ViewParent,
component_id: str = "",
logger: Optional[Logger] = None,
**kwargs,
) -> None:
component_type: Optional[str] = kwargs.pop("component_type", None)
self._set_attrs(
component_type=component_type or self.OUIA_COMPONENT_TYPE or type(self).__name__,
component_id=getattr(self, "OUIA_ID", component_id),
)
super().__init__(
parent=parent,
logger=logger,
**kwargs,
)
class OUIAGenericWidget(OUIABase, Widget, ClickableMixin):
"""A base class for any OUIA compatible widget.
Children classes must have the same name as the value of ``data-ouia-component-type`` attribute
of the root HTML element.
Args:
component_id: value of data-ouia-component-id attribute.
component_type: value of data-ouia-component-type attribute.
"""
OUIA_COMPONENT_TYPE: str
def __init__(
self,
parent: ViewParent,
component_id: Optional[str] = None,
logger: Optional[Logger] = None,
component_type: Optional[str] = None,
) -> None:
self._set_attrs(
component_type=component_type or self.OUIA_COMPONENT_TYPE or type(self).__name__,
component_id=component_id,
)
super().__init__(parent=parent, logger=logger)
|
the-stack_0_11749 | import os
import jinja2
import logging
from mkdocs import utils
from mkdocs.utils import filters
from mkdocs.config.base import ValidationError
log = logging.getLogger(__name__)
log.addFilter(utils.warning_filter)
class Theme:
"""
A Theme object.
Keywords:
name: The name of the theme as defined by its entrypoint.
custom_dir: User defined directory for custom templates.
static_templates: A list of templates to render as static pages.
All other keywords are passed as-is and made available as a key/value mapping.
"""
def __init__(self, name=None, **user_config):
self.name = name
self._vars = {}
# MkDocs provided static templates are always included
package_dir = os.path.abspath(os.path.dirname(__file__))
mkdocs_templates = os.path.join(package_dir, 'templates')
self.static_templates = set(os.listdir(mkdocs_templates))
# Build self.dirs from various sources in order of precedence
self.dirs = []
if 'custom_dir' in user_config:
self.dirs.append(user_config.pop('custom_dir'))
if self.name:
self._load_theme_config(name)
# Include templates provided directly by MkDocs (outside any theme)
self.dirs.append(mkdocs_templates)
# Handle remaining user configs. Override theme configs (if set)
self.static_templates.update(user_config.pop('static_templates', []))
self._vars.update(user_config)
def __repr__(self):
return "{}(name='{}', dirs={}, static_templates={}, {})".format(
self.__class__.__name__, self.name, self.dirs, list(self.static_templates),
', '.join('{}={}'.format(k, repr(v)) for k, v in self._vars.items())
)
def __getitem__(self, key):
return self._vars[key]
def __setitem__(self, key, value):
self._vars[key] = value
def __contains__(self, item):
return item in self._vars
def __iter__(self):
return iter(self._vars)
def _load_theme_config(self, name):
""" Recursively load theme and any parent themes. """
theme_dir = utils.get_theme_dir(name)
self.dirs.append(theme_dir)
try:
file_path = os.path.join(theme_dir, 'mkdocs_theme.yml')
with open(file_path, 'rb') as f:
theme_config = utils.yaml_load(f)
if theme_config is None:
theme_config = {}
except OSError as e:
log.debug(e)
raise ValidationError(
"The theme '{}' does not appear to have a configuration file. "
"Please upgrade to a current version of the theme.".format(name)
)
log.debug("Loaded theme configuration for '%s' from '%s': %s", name, file_path, theme_config)
parent_theme = theme_config.pop('extends', None)
if parent_theme:
themes = utils.get_theme_names()
if parent_theme not in themes:
raise ValidationError(
"The theme '{}' inherits from '{}', which does not appear to be installed. "
"The available installed themes are: {}".format(name, parent_theme, ', '.join(themes))
)
self._load_theme_config(parent_theme)
self.static_templates.update(theme_config.pop('static_templates', []))
self._vars.update(theme_config)
def get_env(self):
""" Return a Jinja environment for the theme. """
loader = jinja2.FileSystemLoader(self.dirs)
# No autoreload because editing a template in the middle of a build is not useful.
env = jinja2.Environment(loader=loader, auto_reload=False)
env.filters['tojson'] = filters.tojson
env.filters['url'] = filters.url_filter
return env
|
the-stack_0_11750 | """Tools for processing Texas PUDF in conjunction with HCUP data
Texas does not participate in HCUP, but does provide instead its own Inpatient Public Use Data Files (PUDF) for similar purposes.
More information on Texas Inpatient PUDF at http://www.dshs.state.tx.us/thcic/hospitals/Inpatientpudf.shtm.
"""
import os, re
def meta_from_txt(target):
"""Parses target text file containing Texas PUDF metadata and builds a pandas DataFrame object.
"""
pattern = '(?P<field_number>\d+\w?)\s+(?P<field>\S+)\s+(?:(?P<label>.*?)\s*)(?P<position>\d+)\s+(?P<width>\d+)\s+(?P<data_type>\w+)'
joined = ' '.join([x for x in open(target)])
captured = [x.groupdict() for x in re.finditer(pattern, joined)]
#avoid circular imports
from pandas import DataFrame
meta = DataFrame(captured)
if len(meta[ meta.field == 'SPEC_UNIT_1' ]) == 1:
#we have individual spec unit cols, so drop any combined one
#otherwise this throws off the parsing of the real files
meta = meta[ meta.field != 'SPEC_UNIT' ]
# Occasionally there's a cute field with a hyphen in it. This breaks SQL since it's a disallowed character for SQL object names due to ambiguity with the subtraction operator.
meta.field = meta.field.map(lambda x: x.replace('-', '_'))
return meta
def get_meta(year, variety='base', split_base_portion=None):
"""Retrieves a meta DataFrame object for a given year of Texas PUDF data
split_base_portion should only be used for 2011 and 2012 base years, which are split into two chunks by Texas
"""
year = int(year)#sometimes this is passed a string and I'd rather it work anyways
varieties = ['base', 'charges', 'facility']
years = xrange(1999, 2013)
assert variety.lower() in varieties, "No Texas PUDF definitions available for variety %s" % variety
assert year in years, "No Texas PUDF definitions available for year %s" % year
if not (year > 2010 and variety.lower == 'base'):
filename = 'tx_pudf_%d_%s_definition.txt' % (year, variety.lower())
else:
assert split_base_portion in [1, 2], "For 2011 and 2012 base files, must specify which portion (1 or 2)"
filename = 'tx_pudf_%d_%s_definition_%d.txt' % (year, variety.lower(), split_base_portion)
from .hachoir import BUNDLED_LOADFILE_DIR
target = os.path.join(BUNDLED_LOADFILE_DIR, 'tx_pudf', filename)
return meta_from_txt(target)
def meta_augment(meta_df):
"""Akin to sas.meta_augment(), but for use with meta derived from Texas Inpatient Public Use Data Files.
"""
meta_df['length'] = meta_df['width']
meta_df['scale'] = meta_df['field'].map(lambda x: 2 if x.find('CHARGES') > -1 or x.find('AMOUNT') > -1 else 0)
return meta_df
|
the-stack_0_11753 | import json
import vidservers
from utils import gen_client, getLink, process_xpath
# -----------------------------------------------------------------------
def get_server_link(ep_number, server_id, episodes, servers, c):
client = gen_client(referer=f"{c['scheme']}{c['host']}")
sourceId = episodes[ep_number][server_id]
url = f"{c['scheme']}{c['host']}/ajax/anime/episode?id={sourceId}"
res = client.get(url).json()
encryptedURL = res['url']
server_link = getLink(encryptedURL)
return server_link
# -----------------------------------------------------------------------
def get_dl(server_link: str, server_id, servers):
dl = getattr(vidservers, servers[server_id].lower())(server_link)
return dl
# -----------------------------------------------------------------------
def parse_servers(data: str):
# server_id [ {server_id: server_name},... ]
servers = process_xpath("//*[contains(@id, 'server')]", data)
server_id = {}
server_choices = []
server_lookup = {}
for server in servers:
server_name = server.text_content().strip()
id = server.get('data-id')
server_id[id] = server_name
server_choices.append(server_name)
server_lookup[server_name] = id
return server_id, server_choices, server_lookup
# -----------------------------------------------------------------------
def parse_episodes(data: str):
# [ ep_num: { server_id: 'episode_id',... },... ]
episodes_parsed = {}
episodes = process_xpath("//a[@data-sources]", data)
for ep in episodes:
episodes_parsed[ep.get('data-base')] = json.loads(ep.get('data-sources'))
return episodes_parsed
# -----------------------------------------------------------------------
|
the-stack_0_11754 | import logging
RANDOM_SEED = 20201234
import argparse
import openml
import os
import numpy as np
import string
import pandas as pd
import scipy
import math
OPENML_REGRESSION_LIST = [201, 1191, 215, 344, 537, 564, 1196, 1199, 1203, 1206,
5648, 23515, 41506, 41539, 42729, 42496]
NS_LIST = list(string.ascii_lowercase) + list(string.ascii_uppercase)
# NS_LIST = list(string.ascii_lowercase)[:10]
OML_target_attribute_dict = {
42236: 'pm2.5'
}
# from ..vw_benchmark.config import QW_OML_API_KEY, VW_DS_DIR
VW_DS_DIR = './test/vw/vw_benchmark/data/openml_vwdatasets/'
QW_OML_API_KEY = '8c4eebcda506ae1065902c2b224369b9'
#TODO: how to get these info from config.py
class OpenML2VWData:
VW_DS_DIR = VW_DS_DIR
def __init__(self, did, max_ns_num, task_type='regression'):
self._did = did
self._task_type = task_type
self._is_regression = False
self.vw_x_dic_list = []
self.Y = []
if 'regression' in self._task_type:
self._is_regression = True
self.vw_examples = self.load_vw_dataset(did, OpenML2VWData.VW_DS_DIR, self._is_regression, max_ns_num)
print( 'number of samples', len(self.vw_examples))
for i, e in enumerate(self.vw_examples):
self.Y.append(float(e.split('|')[0]))
print( self.Y[0:5])
logging.info('y label%s', self.Y[0:5])
@staticmethod
def load_vw_dataset(did, ds_dir, is_regression, max_ns_num):
import os
data_list = []
if is_regression:
fname = 'ds_{}_{}_{}.vw'.format(did, max_ns_num, 0) # the second field specifies the largest number of namespaces using.
vw_dataset_file = os.path.join(ds_dir, fname)
if not os.path.exists(vw_dataset_file) or os.stat(vw_dataset_file).st_size < 1000:
get_oml_to_vw(did, max_ns_num)
print(ds_dir, vw_dataset_file)
if not os.path.exists(ds_dir): os.makedirs(ds_dir)
with open(os.path.join(ds_dir, fname), 'r') as f:
vw_content = f.read().splitlines()
print(type(vw_content), len(vw_content))
return vw_content
# target # of ns: 10-26.
# TODO: split features into 10-26 ns:(1) look at the prefix (10<# of unique prefix< 26); (2) sequentially.
def oml_to_vw_no_grouping(X, y, ds_dir, fname):
print('no feature grouping')
with open(os.path.join(ds_dir, fname), 'w') as f:
if isinstance(X, pd.DataFrame):
for i in range(len(X)):
ns_line = '{} |{}'.format(str(y[i]), '|'.join('{} {}:{:.6f}'.format(NS_LIST[j], j, val) for
j, val in enumerate(X.iloc[i].to_list()) ))
f.write(ns_line)
f.write('\n')
elif isinstance(X, np.ndarray):
for i in range(len(X)):
ns_line = '{} |{}'.format(str(y[i]), '|'.join('{} {}:{:.6f}'.format(NS_LIST[j], j, val) for
j, val in enumerate(X[i]) ))
f.write(ns_line)
f.write('\n')
elif isinstance(X, scipy.sparse.csr_matrix):
print('NotImplementedError for sparse data')
NotImplementedError
def oml_to_vw_w_grouping(X, y, ds_dir, fname, orginal_dim, group_num, grouping_method='sequential'):
all_indexes = [i for i in range(orginal_dim)]
print('grouping', group_num)
# split all_indexes into # group_num of groups
# max_size_per_group = math.ceil(orginal_dim/float(group_num))
max_size_per_group = int(np.ceil(orginal_dim / float(group_num)))
# Option 1: sequential grouping
if grouping_method == 'sequential':
group_indexes = [] # lists of lists
print('indexes', group_num)
for i in range(group_num):
print('indexes', group_num, max_size_per_group)
indexes = [ind for ind in range(i*max_size_per_group, min( (i+1)*max_size_per_group, orginal_dim)) ]
print('indexes', group_num, indexes)
if len(indexes)>0: group_indexes.append(indexes)
print(group_indexes)
print(group_indexes)
else:
NotImplementedError
if group_indexes:
print('group_indexes')
with open(os.path.join(ds_dir, fname), 'w') as f:
if isinstance(X, pd.DataFrame):
raise NotImplementedError
elif isinstance(X, np.ndarray):
for i in range(len(X)):
# ns_content = '{} {}:{:.6f}'.format(NS_LIST[j], j, val) for j, val in enumerate(X[i])
NS_content = []
for zz in range(len(group_indexes)):
ns_features = ' '.join('{}:{:.6f}'.format(ind, X[i][ind]) for ind in group_indexes[zz])
NS_content.append(ns_features)
ns_line = '{} |{}'.format(str(y[i]), '|'.join('{} {}'.format(NS_LIST[j], NS_content[j]) for
j in range(len(group_indexes)) ))
f.write(ns_line)
f.write('\n')
elif isinstance(X, scipy.sparse.csr_matrix):
print('NotImplementedError for sparse data')
NotImplementedError
def save_vw_dataset_w_ns(X, y, did, ds_dir, max_ns_num, is_regression):
""" convert openml dataset to vw example and save to file
"""
print('is_regression',is_regression)
if is_regression:
fname = 'ds_{}_{}_{}.vw'.format(did, max_ns_num, 0)
print('dataset size', X.shape[0], X.shape[1])
print('saving data', did, ds_dir, fname)
dim = X.shape[1]
# do not do feature grouping
from os import path
# if not path.exists(os.path.join(ds_dir, fname)):
# TODO: remove no_grouping code
if dim < max_ns_num:
oml_to_vw_no_grouping(X, y, ds_dir, fname)
else:
oml_to_vw_w_grouping(X, y, ds_dir, fname, dim, group_num=max_ns_num)
def shuffle_data(X, y, seed):
try:
n = len(X)
except:
n = X.getnnz()
perm = np.random.RandomState(seed=seed).permutation(n)
X_shuf = X[perm, :]
y_shuf = y[perm]
return X_shuf, y_shuf
def get_oml_to_vw(did, max_ns_num, ds_dir=VW_DS_DIR):
success = False
print('-----getting oml dataset-------', did)
ds = openml.datasets.get_dataset(did)
target_attribute = ds.default_target_attribute
if target_attribute is None and did in OML_target_attribute_dict:
target_attribute = OML_target_attribute_dict[did]
print('target=ds.default_target_attribute', target_attribute)
data = ds.get_data(target=target_attribute, dataset_format='array')
X, y = data[0], data[1] # return X: pd DataFrame, y: pd series
import scipy
if scipy.sparse.issparse(X):
X = scipy.sparse.csr_matrix.toarray(X)
print('is sparse matrix')
if data and isinstance(X, np.ndarray):
print('-----converting oml to vw and and saving oml dataset-------')
save_vw_dataset_w_ns(X, y, did, ds_dir, max_ns_num, is_regression=True)
success = True
else:
print('---failed to convert/save oml dataset to vw!!!----')
try:
X, y = data[0], data[1] # return X: pd DataFrame, y: pd series
if data and isinstance(X, np.ndarray):
print('-----converting oml to vw and and saving oml dataset-------')
save_vw_dataset_w_ns(X, y, did, ds_dir, max_ns_num, is_regression = True)
success = True
else:
print('---failed to convert/save oml dataset to vw!!!----')
except:
print('-------------failed to get oml dataset!!!', did)
return success
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='openML to vw converter')
parser.add_argument('-dataset', type=int, default=None, help='dataset id')
parser.add_argument('-ns_num', '--ns_num', metavar='ns_num', type = int,
default=10, help="max name space number")
parser.add_argument('-min_sample_size', type=int, default=10000, help='minimum sample size')
parser.add_argument('-max_sample_size', type=int, default=None, help='maximum sample size')
args = parser.parse_args()
openml.config.apikey = QW_OML_API_KEY
openml.config.set_cache_directory('./data/omlcache/')
print('loaded openML')
if not os.path.exists(VW_DS_DIR): os.makedirs(VW_DS_DIR)
if args.dataset is not None:
dids = [args.dataset]
else:
if args.min_sample_size >=10000 and args.max_sample_size is None:
dids = OPENML_REGRESSION_LIST
failed_datasets = []
for did in sorted(dids):
print('processing did', did)
print('getting data,', did)
success = get_oml_to_vw(did, args.ns_num)
if not success:
failed_datasets.append(did)
print('-----------failed datasets', failed_datasets)
## command line:
# python openml_data_helper.py -min_sample_size 10000
# failed datasets [1414, 5572, 40753, 41463, 42080, 42092, 42125, 42130, 42131, 42160, 42183, 42207,
# 42208, 42362, 42367, 42464, 42559, 42635, 42672, 42673, 42677, 42688, 42720, 42721, 42726, 42728, 42729, 42731] |
the-stack_0_11755 | from rlalgos.pytorch.mf import dqn as dqn_pytorch, sac as sac_pytorch, td3 as td3_pytorch, \
categorical_dqn as c51_pytorch, qr_dqn as qr_dqn_pytorch
from rlalgos.pytorch.mf.atari import categorical_dqn as c51_pytorch, dqn as atari_dqn_pytorch, \
qr_dqn as atari_qr_dqn_pytorch
from rlalgos.pytorch.offline import cql as cql_pytorch
from rlutils.infra.runner import get_argparser_from_func
# from rlutils.tf.algos.mb import pets
# from rlutils.tf.algos.mf import td3, ppo, trpo, sac, ddpg, dqn
# from rlutils.tf.algos.offline import cql, plas
__tf__ = ['ppo', 'td3', 'trpo', 'sac', 'ddpg', 'cql', 'plas', 'dqn', 'pets']
__all__ = ['sac_pytorch', 'td3_pytorch', 'atari_dqn_pytorch', 'dqn_pytorch', 'cql_pytorch', 'c51_pytorch',
'c51_pytorch', 'qr_dqn_pytorch', 'atari_qr_dqn_pytorch']
def main():
import argparse
parser = argparse.ArgumentParser('Running rl algorithms', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
algorithm_parsers = parser.add_subparsers(title='algorithm', help='algorithm specific parser', dest='algo')
for algo in __all__:
algo_parser = algorithm_parsers.add_parser(algo, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
get_argparser_from_func(eval(f'{algo}.Runner.main'), algo_parser)
kwargs = vars(parser.parse_args())
algo = kwargs.pop('algo')
eval(f'{algo}.Runner.main')(**kwargs)
if __name__ == '__main__':
main()
|
the-stack_0_11758 | import os
import random
import numpy as np
import vec_noise
from PIL import Image
from tqdm import tqdm
WORLD_SIZE = [2000, 2000, 3]
WORKING_DIR = os.getcwd()
DATA_DIR = WORKING_DIR[:-8]
os.system("cls")
# +------------------------------------------------------------+
# | Made by Jonáš Erlebach |
# | Thanks to third party libraries from https://pypi.org/ |
# +------------------------------------------------------------+
class WorldGeneration:
def __init__(self, DATA_DIR):
self.DATA_DIR = DATA_DIR
self.NOISE_SCALE = 0.002 # def 0.002
self.octaves_devider = 1
def CreateImage(self):
x = [[[0, 0, 0] for x in range(WORLD_SIZE[0])] for _y in range(WORLD_SIZE[1])]
startx, starty = random.randint(0, 50000), random.randint(0, 50000)
for x_ in tqdm(range(WORLD_SIZE[0])):
for y in range(WORLD_SIZE[1]):
value = vec_noise.snoise2(startx + x_ * self.NOISE_SCALE, starty + y * self.NOISE_SCALE,
12 // self.octaves_devider)
if value < -0.45:
x[x_][y][0] = 128
x[x_][y][1] = 197
x[x_][y][2] = 222
continue
if value < -0.35:
x[x_][y][0] = 248
x[x_][y][1] = 240
x[x_][y][2] = 164
continue
if value < 0.35:
x[x_][y][0] = 126
x[x_][y][1] = 200
x[x_][y][2] = 80
continue
if value < 0.53:
x[x_][y][0] = 200
x[x_][y][1] = 200
x[x_][y][2] = 200
continue
else:
x[x_][y][0] = 255
x[x_][y][1] = 255
x[x_][y][2] = 255
continue
self.to_image(x)
def to_image(self, array):
print("Creating Image")
array = np.array(array).astype(np.uint8)
img = Image.fromarray(array)
img.save(self.DATA_DIR + "\\Maps\\BG.png")
print("Image Created")
if __name__ == '__main__':
WorldGeneration(DATA_DIR).CreateImage()
|
the-stack_0_11761 | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2014 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import hashlib
import sys
import time
import traceback
import json
import certifi
import urllib.parse
import aiohttp
try:
from . import paymentrequest_pb2 as pb2
except ImportError:
sys.exit("Error: could not find paymentrequest_pb2.py. Create it with 'protoc --proto_path=electrum_audax/ --python_out=electrum_audax/ electrum_audax/paymentrequest.proto'")
from . import bitcoin, ecc, util, transaction, x509, rsakey
from .util import bh2u, bfh, export_meta, import_meta, make_aiohttp_session
from .crypto import sha256
from .bitcoin import TYPE_ADDRESS
from .transaction import TxOutput
from .network import Network
from .logging import get_logger, Logger
_logger = get_logger(__name__)
REQUEST_HEADERS = {'Accept': 'application/audax-paymentrequest', 'User-Agent': 'Electrum'}
ACK_HEADERS = {'Content-Type':'application/audax-payment','Accept':'application/audax-paymentack','User-Agent':'Electrum'}
ca_path = certifi.where()
ca_list = None
ca_keyID = None
def load_ca_list():
global ca_list, ca_keyID
if ca_list is None:
ca_list, ca_keyID = x509.load_certificates(ca_path)
# status of payment requests
PR_UNPAID = 0
PR_EXPIRED = 1
PR_UNKNOWN = 2 # sent but not propagated
PR_PAID = 3 # send and propagated
async def get_payment_request(url: str) -> 'PaymentRequest':
u = urllib.parse.urlparse(url)
error = None
if u.scheme in ('http', 'https'):
resp_content = None
try:
proxy = Network.get_instance().proxy
async with make_aiohttp_session(proxy, headers=REQUEST_HEADERS) as session:
async with session.get(url) as response:
resp_content = await response.read()
response.raise_for_status()
# Guard against `audax:`-URIs with invalid payment request URLs
if "Content-Type" not in response.headers \
or response.headers["Content-Type"] != "application/audax-paymentrequest":
data = None
error = "payment URL not pointing to a payment request handling server"
else:
data = resp_content
data_len = len(data) if data is not None else None
_logger.info(f'fetched payment request {url} {data_len}')
except aiohttp.ClientError as e:
error = f"Error while contacting payment URL:\n{repr(e)}"
if isinstance(e, aiohttp.ClientResponseError) and e.status == 400 and resp_content:
error += "\n" + resp_content.decode("utf8")
data = None
elif u.scheme == 'file':
try:
with open(u.path, 'r', encoding='utf-8') as f:
data = f.read()
except IOError:
data = None
error = "payment URL not pointing to a valid file"
else:
data = None
error = f"Unknown scheme for payment request. URL: {url}"
pr = PaymentRequest(data, error)
return pr
class PaymentRequest:
def __init__(self, data, error=None):
self.raw = data
self.error = error
self.parse(data)
self.requestor = None # known after verify
self.tx = None
def __str__(self):
return str(self.raw)
def parse(self, r):
if self.error:
return
self.id = bh2u(sha256(r)[0:16])
try:
self.data = pb2.PaymentRequest()
self.data.ParseFromString(r)
except:
self.error = "cannot parse payment request"
return
self.details = pb2.PaymentDetails()
self.details.ParseFromString(self.data.serialized_payment_details)
self.outputs = []
for o in self.details.outputs:
type_, addr = transaction.get_address_from_output_script(o.script)
if type_ != TYPE_ADDRESS:
# TODO maybe rm restriction but then get_requestor and get_id need changes
self.error = "only addresses are allowed as outputs"
return
self.outputs.append(TxOutput(type_, addr, o.amount))
self.memo = self.details.memo
self.payment_url = self.details.payment_url
def is_pr(self):
return self.get_amount() != 0
#return self.get_outputs() != [(TYPE_ADDRESS, self.get_requestor(), self.get_amount())]
def verify(self, contacts):
if self.error:
return False
if not self.raw:
self.error = "Empty request"
return False
pr = pb2.PaymentRequest()
try:
pr.ParseFromString(self.raw)
except:
self.error = "Error: Cannot parse payment request"
return False
if not pr.signature:
# the address will be displayed as requestor
self.requestor = None
return True
if pr.pki_type in ["x509+sha256", "x509+sha1"]:
return self.verify_x509(pr)
elif pr.pki_type in ["dnssec+audax", "dnssec+ecdsa"]:
return self.verify_dnssec(pr, contacts)
else:
self.error = "ERROR: Unsupported PKI Type for Message Signature"
return False
def verify_x509(self, paymntreq):
load_ca_list()
if not ca_list:
self.error = "Trusted certificate authorities list not found"
return False
cert = pb2.X509Certificates()
cert.ParseFromString(paymntreq.pki_data)
# verify the chain of certificates
try:
x, ca = verify_cert_chain(cert.certificate)
except BaseException as e:
_logger.exception('')
self.error = str(e)
return False
# get requestor name
self.requestor = x.get_common_name()
if self.requestor.startswith('*.'):
self.requestor = self.requestor[2:]
# verify the BIP70 signature
pubkey0 = rsakey.RSAKey(x.modulus, x.exponent)
sig = paymntreq.signature
paymntreq.signature = b''
s = paymntreq.SerializeToString()
sigBytes = bytearray(sig)
msgBytes = bytearray(s)
if paymntreq.pki_type == "x509+sha256":
hashBytes = bytearray(hashlib.sha256(msgBytes).digest())
verify = pubkey0.verify(sigBytes, x509.PREFIX_RSA_SHA256 + hashBytes)
elif paymntreq.pki_type == "x509+sha1":
verify = pubkey0.hashAndVerify(sigBytes, msgBytes)
else:
self.error = f"ERROR: unknown pki_type {paymntreq.pki_type} in Payment Request"
return False
if not verify:
self.error = "ERROR: Invalid Signature for Payment Request Data"
return False
### SIG Verified
self.error = 'Signed by Trusted CA: ' + ca.get_common_name()
return True
def verify_dnssec(self, pr, contacts):
sig = pr.signature
alias = pr.pki_data
info = contacts.resolve(alias)
if info.get('validated') is not True:
self.error = "Alias verification failed (DNSSEC)"
return False
if pr.pki_type == "dnssec+audax":
self.requestor = alias
address = info.get('address')
pr.signature = b''
message = pr.SerializeToString()
if ecc.verify_message_with_address(address, sig, message):
self.error = 'Verified with DNSSEC'
return True
else:
self.error = "verify failed"
return False
else:
self.error = "unknown algo"
return False
def has_expired(self):
return self.details.expires and self.details.expires < int(time.time())
def get_expiration_date(self):
return self.details.expires
def get_amount(self):
return sum(map(lambda x:x[2], self.outputs))
def get_address(self):
o = self.outputs[0]
assert o.type == TYPE_ADDRESS
return o.address
def get_requestor(self):
return self.requestor if self.requestor else self.get_address()
def get_verify_status(self):
return self.error if self.requestor else "No Signature"
def get_memo(self):
return self.memo
def get_dict(self):
return {
'requestor': self.get_requestor(),
'memo':self.get_memo(),
'exp': self.get_expiration_date(),
'amount': self.get_amount(),
'signature': self.get_verify_status(),
'txid': self.tx,
'outputs': self.get_outputs()
}
def get_id(self):
return self.id if self.requestor else self.get_address()
def get_outputs(self):
return self.outputs[:]
async def send_payment_and_receive_paymentack(self, raw_tx, refund_addr):
pay_det = self.details
if not self.details.payment_url:
return False, "no url"
paymnt = pb2.Payment()
paymnt.merchant_data = pay_det.merchant_data
paymnt.transactions.append(bfh(raw_tx))
ref_out = paymnt.refund_to.add()
ref_out.script = util.bfh(transaction.Transaction.pay_script(TYPE_ADDRESS, refund_addr))
paymnt.memo = "Paid using Electrum"
pm = paymnt.SerializeToString()
payurl = urllib.parse.urlparse(pay_det.payment_url)
resp_content = None
try:
proxy = Network.get_instance().proxy
async with make_aiohttp_session(proxy, headers=ACK_HEADERS) as session:
async with session.post(payurl.geturl(), data=pm) as response:
resp_content = await response.read()
response.raise_for_status()
try:
paymntack = pb2.PaymentACK()
paymntack.ParseFromString(resp_content)
except Exception:
return False, "PaymentACK could not be processed. Payment was sent; please manually verify that payment was received."
print(f"PaymentACK message received: {paymntack.memo}")
return True, paymntack.memo
except aiohttp.ClientError as e:
error = f"Payment Message/PaymentACK Failed:\n{repr(e)}"
if isinstance(e, aiohttp.ClientResponseError) and e.status == 400 and resp_content:
error += "\n" + resp_content.decode("utf8")
return False, error
def make_unsigned_request(req):
from .transaction import Transaction
addr = req['address']
time = req.get('time', 0)
exp = req.get('exp', 0)
if time and type(time) != int:
time = 0
if exp and type(exp) != int:
exp = 0
amount = req['amount']
if amount is None:
amount = 0
memo = req['memo']
script = bfh(Transaction.pay_script(TYPE_ADDRESS, addr))
outputs = [(script, amount)]
pd = pb2.PaymentDetails()
for script, amount in outputs:
pd.outputs.add(amount=amount, script=script)
pd.time = time
pd.expires = time + exp if exp else 0
pd.memo = memo
pr = pb2.PaymentRequest()
pr.serialized_payment_details = pd.SerializeToString()
pr.signature = util.to_bytes('')
return pr
def sign_request_with_alias(pr, alias, alias_privkey):
pr.pki_type = 'dnssec+audax'
pr.pki_data = str(alias)
message = pr.SerializeToString()
ec_key = ecc.ECPrivkey(alias_privkey)
compressed = bitcoin.is_compressed_privkey(alias_privkey)
pr.signature = ec_key.sign_message(message, compressed)
def verify_cert_chain(chain):
""" Verify a chain of certificates. The last certificate is the CA"""
load_ca_list()
# parse the chain
cert_num = len(chain)
x509_chain = []
for i in range(cert_num):
x = x509.X509(bytearray(chain[i]))
x509_chain.append(x)
if i == 0:
x.check_date()
else:
if not x.check_ca():
raise Exception("ERROR: Supplied CA Certificate Error")
if not cert_num > 1:
raise Exception("ERROR: CA Certificate Chain Not Provided by Payment Processor")
# if the root CA is not supplied, add it to the chain
ca = x509_chain[cert_num-1]
if ca.getFingerprint() not in ca_list:
keyID = ca.get_issuer_keyID()
f = ca_keyID.get(keyID)
if f:
root = ca_list[f]
x509_chain.append(root)
else:
raise Exception("Supplied CA Not Found in Trusted CA Store.")
# verify the chain of signatures
cert_num = len(x509_chain)
for i in range(1, cert_num):
x = x509_chain[i]
prev_x = x509_chain[i-1]
algo, sig, data = prev_x.get_signature()
sig = bytearray(sig)
pubkey = rsakey.RSAKey(x.modulus, x.exponent)
if algo == x509.ALGO_RSA_SHA1:
verify = pubkey.hashAndVerify(sig, data)
elif algo == x509.ALGO_RSA_SHA256:
hashBytes = bytearray(hashlib.sha256(data).digest())
verify = pubkey.verify(sig, x509.PREFIX_RSA_SHA256 + hashBytes)
elif algo == x509.ALGO_RSA_SHA384:
hashBytes = bytearray(hashlib.sha384(data).digest())
verify = pubkey.verify(sig, x509.PREFIX_RSA_SHA384 + hashBytes)
elif algo == x509.ALGO_RSA_SHA512:
hashBytes = bytearray(hashlib.sha512(data).digest())
verify = pubkey.verify(sig, x509.PREFIX_RSA_SHA512 + hashBytes)
else:
raise Exception("Algorithm not supported: {}".format(algo))
if not verify:
raise Exception("Certificate not Signed by Provided CA Certificate Chain")
return x509_chain[0], ca
def check_ssl_config(config):
from . import pem
key_path = config.get('ssl_privkey')
cert_path = config.get('ssl_chain')
with open(key_path, 'r', encoding='utf-8') as f:
params = pem.parse_private_key(f.read())
with open(cert_path, 'r', encoding='utf-8') as f:
s = f.read()
bList = pem.dePemList(s, "CERTIFICATE")
# verify chain
x, ca = verify_cert_chain(bList)
# verify that privkey and pubkey match
privkey = rsakey.RSAKey(*params)
pubkey = rsakey.RSAKey(x.modulus, x.exponent)
assert x.modulus == params[0]
assert x.exponent == params[1]
# return requestor
requestor = x.get_common_name()
if requestor.startswith('*.'):
requestor = requestor[2:]
return requestor
def sign_request_with_x509(pr, key_path, cert_path):
from . import pem
with open(key_path, 'r', encoding='utf-8') as f:
params = pem.parse_private_key(f.read())
privkey = rsakey.RSAKey(*params)
with open(cert_path, 'r', encoding='utf-8') as f:
s = f.read()
bList = pem.dePemList(s, "CERTIFICATE")
certificates = pb2.X509Certificates()
certificates.certificate.extend(map(bytes, bList))
pr.pki_type = 'x509+sha256'
pr.pki_data = certificates.SerializeToString()
msgBytes = bytearray(pr.SerializeToString())
hashBytes = bytearray(hashlib.sha256(msgBytes).digest())
sig = privkey.sign(x509.PREFIX_RSA_SHA256 + hashBytes)
pr.signature = bytes(sig)
def serialize_request(req):
pr = make_unsigned_request(req)
signature = req.get('sig')
requestor = req.get('name')
if requestor and signature:
pr.signature = bfh(signature)
pr.pki_type = 'dnssec+audax'
pr.pki_data = str(requestor)
return pr
def make_request(config, req):
pr = make_unsigned_request(req)
key_path = config.get('ssl_privkey')
cert_path = config.get('ssl_chain')
if key_path and cert_path:
sign_request_with_x509(pr, key_path, cert_path)
return pr
class InvoiceStore(Logger):
def __init__(self, storage):
Logger.__init__(self)
self.storage = storage
self.invoices = {}
self.paid = {}
d = self.storage.get('invoices', {})
self.load(d)
def set_paid(self, pr, txid):
pr.tx = txid
pr_id = pr.get_id()
self.paid[txid] = pr_id
if pr_id not in self.invoices:
# in case the user had deleted it previously
self.add(pr)
def load(self, d):
for k, v in d.items():
try:
pr = PaymentRequest(bfh(v.get('hex')))
pr.tx = v.get('txid')
pr.requestor = v.get('requestor')
self.invoices[k] = pr
if pr.tx:
self.paid[pr.tx] = k
except:
continue
def import_file(self, path):
def validate(data):
return data # TODO
import_meta(path, validate, self.on_import)
def on_import(self, data):
self.load(data)
self.save()
def export_file(self, filename):
export_meta(self.dump(), filename)
def dump(self):
d = {}
for k, pr in self.invoices.items():
d[k] = {
'hex': bh2u(pr.raw),
'requestor': pr.requestor,
'txid': pr.tx
}
return d
def save(self):
self.storage.put('invoices', self.dump())
def get_status(self, key):
pr = self.get(key)
if pr is None:
self.logger.info(f"get_status() can't find pr for {key}")
return
if pr.tx is not None:
return PR_PAID
if pr.has_expired():
return PR_EXPIRED
return PR_UNPAID
def add(self, pr):
key = pr.get_id()
self.invoices[key] = pr
self.save()
return key
def remove(self, key):
self.invoices.pop(key)
self.save()
def get(self, k):
return self.invoices.get(k)
def sorted_list(self):
# sort
return self.invoices.values()
def unpaid_invoices(self):
return [self.invoices[k] for k in
filter(lambda x: self.get_status(x) not in (PR_PAID, None),
self.invoices.keys())
]
|
the-stack_0_11764 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('example', '0002_relatedsubscription'),
]
operations = [
migrations.CreateModel(
name='Summary',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('subscriptions', models.ManyToManyField(related_name='summaries', to='example.StockSubscription')),
],
),
]
|
the-stack_0_11765 | # -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1
from google.api_core import grpc_helpers_async
from google.api_core import operations_v1
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.aiplatform_v1beta1.types import entity_type
from google.cloud.aiplatform_v1beta1.types import entity_type as gca_entity_type
from google.cloud.aiplatform_v1beta1.types import feature
from google.cloud.aiplatform_v1beta1.types import feature as gca_feature
from google.cloud.aiplatform_v1beta1.types import featurestore
from google.cloud.aiplatform_v1beta1.types import featurestore_service
from google.longrunning import operations_pb2 # type: ignore
from .base import FeaturestoreServiceTransport, DEFAULT_CLIENT_INFO
from .grpc import FeaturestoreServiceGrpcTransport
class FeaturestoreServiceGrpcAsyncIOTransport(FeaturestoreServiceTransport):
"""gRPC AsyncIO backend transport for FeaturestoreService.
The service that handles CRUD and List for resources for
Featurestore.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(
cls,
host: str = "aiplatform.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
def __init__(
self,
*,
host: str = "aiplatform.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsAsyncClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Quick check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsAsyncClient(
self.grpc_channel
)
# Return the client from cache.
return self._operations_client
@property
def create_featurestore(
self,
) -> Callable[
[featurestore_service.CreateFeaturestoreRequest],
Awaitable[operations_pb2.Operation],
]:
r"""Return a callable for the create featurestore method over gRPC.
Creates a new Featurestore in a given project and
location.
Returns:
Callable[[~.CreateFeaturestoreRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_featurestore" not in self._stubs:
self._stubs["create_featurestore"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.FeaturestoreService/CreateFeaturestore",
request_serializer=featurestore_service.CreateFeaturestoreRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["create_featurestore"]
@property
def get_featurestore(
self,
) -> Callable[
[featurestore_service.GetFeaturestoreRequest],
Awaitable[featurestore.Featurestore],
]:
r"""Return a callable for the get featurestore method over gRPC.
Gets details of a single Featurestore.
Returns:
Callable[[~.GetFeaturestoreRequest],
Awaitable[~.Featurestore]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_featurestore" not in self._stubs:
self._stubs["get_featurestore"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.FeaturestoreService/GetFeaturestore",
request_serializer=featurestore_service.GetFeaturestoreRequest.serialize,
response_deserializer=featurestore.Featurestore.deserialize,
)
return self._stubs["get_featurestore"]
@property
def list_featurestores(
self,
) -> Callable[
[featurestore_service.ListFeaturestoresRequest],
Awaitable[featurestore_service.ListFeaturestoresResponse],
]:
r"""Return a callable for the list featurestores method over gRPC.
Lists Featurestores in a given project and location.
Returns:
Callable[[~.ListFeaturestoresRequest],
Awaitable[~.ListFeaturestoresResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_featurestores" not in self._stubs:
self._stubs["list_featurestores"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.FeaturestoreService/ListFeaturestores",
request_serializer=featurestore_service.ListFeaturestoresRequest.serialize,
response_deserializer=featurestore_service.ListFeaturestoresResponse.deserialize,
)
return self._stubs["list_featurestores"]
@property
def update_featurestore(
self,
) -> Callable[
[featurestore_service.UpdateFeaturestoreRequest],
Awaitable[operations_pb2.Operation],
]:
r"""Return a callable for the update featurestore method over gRPC.
Updates the parameters of a single Featurestore.
Returns:
Callable[[~.UpdateFeaturestoreRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_featurestore" not in self._stubs:
self._stubs["update_featurestore"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.FeaturestoreService/UpdateFeaturestore",
request_serializer=featurestore_service.UpdateFeaturestoreRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["update_featurestore"]
@property
def delete_featurestore(
self,
) -> Callable[
[featurestore_service.DeleteFeaturestoreRequest],
Awaitable[operations_pb2.Operation],
]:
r"""Return a callable for the delete featurestore method over gRPC.
Deletes a single Featurestore. The Featurestore must not contain
any EntityTypes or ``force`` must be set to true for the request
to succeed.
Returns:
Callable[[~.DeleteFeaturestoreRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_featurestore" not in self._stubs:
self._stubs["delete_featurestore"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.FeaturestoreService/DeleteFeaturestore",
request_serializer=featurestore_service.DeleteFeaturestoreRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["delete_featurestore"]
@property
def create_entity_type(
self,
) -> Callable[
[featurestore_service.CreateEntityTypeRequest],
Awaitable[operations_pb2.Operation],
]:
r"""Return a callable for the create entity type method over gRPC.
Creates a new EntityType in a given Featurestore.
Returns:
Callable[[~.CreateEntityTypeRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_entity_type" not in self._stubs:
self._stubs["create_entity_type"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.FeaturestoreService/CreateEntityType",
request_serializer=featurestore_service.CreateEntityTypeRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["create_entity_type"]
@property
def get_entity_type(
self,
) -> Callable[
[featurestore_service.GetEntityTypeRequest], Awaitable[entity_type.EntityType]
]:
r"""Return a callable for the get entity type method over gRPC.
Gets details of a single EntityType.
Returns:
Callable[[~.GetEntityTypeRequest],
Awaitable[~.EntityType]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_entity_type" not in self._stubs:
self._stubs["get_entity_type"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.FeaturestoreService/GetEntityType",
request_serializer=featurestore_service.GetEntityTypeRequest.serialize,
response_deserializer=entity_type.EntityType.deserialize,
)
return self._stubs["get_entity_type"]
@property
def list_entity_types(
self,
) -> Callable[
[featurestore_service.ListEntityTypesRequest],
Awaitable[featurestore_service.ListEntityTypesResponse],
]:
r"""Return a callable for the list entity types method over gRPC.
Lists EntityTypes in a given Featurestore.
Returns:
Callable[[~.ListEntityTypesRequest],
Awaitable[~.ListEntityTypesResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_entity_types" not in self._stubs:
self._stubs["list_entity_types"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.FeaturestoreService/ListEntityTypes",
request_serializer=featurestore_service.ListEntityTypesRequest.serialize,
response_deserializer=featurestore_service.ListEntityTypesResponse.deserialize,
)
return self._stubs["list_entity_types"]
@property
def update_entity_type(
self,
) -> Callable[
[featurestore_service.UpdateEntityTypeRequest],
Awaitable[gca_entity_type.EntityType],
]:
r"""Return a callable for the update entity type method over gRPC.
Updates the parameters of a single EntityType.
Returns:
Callable[[~.UpdateEntityTypeRequest],
Awaitable[~.EntityType]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_entity_type" not in self._stubs:
self._stubs["update_entity_type"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.FeaturestoreService/UpdateEntityType",
request_serializer=featurestore_service.UpdateEntityTypeRequest.serialize,
response_deserializer=gca_entity_type.EntityType.deserialize,
)
return self._stubs["update_entity_type"]
@property
def delete_entity_type(
self,
) -> Callable[
[featurestore_service.DeleteEntityTypeRequest],
Awaitable[operations_pb2.Operation],
]:
r"""Return a callable for the delete entity type method over gRPC.
Deletes a single EntityType. The EntityType must not have any
Features or ``force`` must be set to true for the request to
succeed.
Returns:
Callable[[~.DeleteEntityTypeRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_entity_type" not in self._stubs:
self._stubs["delete_entity_type"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.FeaturestoreService/DeleteEntityType",
request_serializer=featurestore_service.DeleteEntityTypeRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["delete_entity_type"]
@property
def create_feature(
self,
) -> Callable[
[featurestore_service.CreateFeatureRequest], Awaitable[operations_pb2.Operation]
]:
r"""Return a callable for the create feature method over gRPC.
Creates a new Feature in a given EntityType.
Returns:
Callable[[~.CreateFeatureRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_feature" not in self._stubs:
self._stubs["create_feature"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.FeaturestoreService/CreateFeature",
request_serializer=featurestore_service.CreateFeatureRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["create_feature"]
@property
def batch_create_features(
self,
) -> Callable[
[featurestore_service.BatchCreateFeaturesRequest],
Awaitable[operations_pb2.Operation],
]:
r"""Return a callable for the batch create features method over gRPC.
Creates a batch of Features in a given EntityType.
Returns:
Callable[[~.BatchCreateFeaturesRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "batch_create_features" not in self._stubs:
self._stubs["batch_create_features"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.FeaturestoreService/BatchCreateFeatures",
request_serializer=featurestore_service.BatchCreateFeaturesRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["batch_create_features"]
@property
def get_feature(
self,
) -> Callable[[featurestore_service.GetFeatureRequest], Awaitable[feature.Feature]]:
r"""Return a callable for the get feature method over gRPC.
Gets details of a single Feature.
Returns:
Callable[[~.GetFeatureRequest],
Awaitable[~.Feature]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_feature" not in self._stubs:
self._stubs["get_feature"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.FeaturestoreService/GetFeature",
request_serializer=featurestore_service.GetFeatureRequest.serialize,
response_deserializer=feature.Feature.deserialize,
)
return self._stubs["get_feature"]
@property
def list_features(
self,
) -> Callable[
[featurestore_service.ListFeaturesRequest],
Awaitable[featurestore_service.ListFeaturesResponse],
]:
r"""Return a callable for the list features method over gRPC.
Lists Features in a given EntityType.
Returns:
Callable[[~.ListFeaturesRequest],
Awaitable[~.ListFeaturesResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_features" not in self._stubs:
self._stubs["list_features"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.FeaturestoreService/ListFeatures",
request_serializer=featurestore_service.ListFeaturesRequest.serialize,
response_deserializer=featurestore_service.ListFeaturesResponse.deserialize,
)
return self._stubs["list_features"]
@property
def update_feature(
self,
) -> Callable[
[featurestore_service.UpdateFeatureRequest], Awaitable[gca_feature.Feature]
]:
r"""Return a callable for the update feature method over gRPC.
Updates the parameters of a single Feature.
Returns:
Callable[[~.UpdateFeatureRequest],
Awaitable[~.Feature]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_feature" not in self._stubs:
self._stubs["update_feature"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.FeaturestoreService/UpdateFeature",
request_serializer=featurestore_service.UpdateFeatureRequest.serialize,
response_deserializer=gca_feature.Feature.deserialize,
)
return self._stubs["update_feature"]
@property
def delete_feature(
self,
) -> Callable[
[featurestore_service.DeleteFeatureRequest], Awaitable[operations_pb2.Operation]
]:
r"""Return a callable for the delete feature method over gRPC.
Deletes a single Feature.
Returns:
Callable[[~.DeleteFeatureRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_feature" not in self._stubs:
self._stubs["delete_feature"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.FeaturestoreService/DeleteFeature",
request_serializer=featurestore_service.DeleteFeatureRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["delete_feature"]
@property
def import_feature_values(
self,
) -> Callable[
[featurestore_service.ImportFeatureValuesRequest],
Awaitable[operations_pb2.Operation],
]:
r"""Return a callable for the import feature values method over gRPC.
Imports Feature values into the Featurestore from a
source storage.
The progress of the import is tracked by the returned
operation. The imported features are guaranteed to be
visible to subsequent read operations after the
operation is marked as successfully done.
If an import operation fails, the Feature values
returned from reads and exports may be inconsistent. If
consistency is required, the caller must retry the same
import request again and wait till the new operation
returned is marked as successfully done.
There are also scenarios where the caller can cause
inconsistency.
- Source data for import contains multiple distinct
Feature values for the same entity ID and timestamp.
- Source is modified during an import. This includes
adding, updating, or removing source data and/or
metadata. Examples of updating metadata include but are
not limited to changing storage location, storage class,
or retention policy.
- Online serving cluster is under-provisioned.
Returns:
Callable[[~.ImportFeatureValuesRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "import_feature_values" not in self._stubs:
self._stubs["import_feature_values"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.FeaturestoreService/ImportFeatureValues",
request_serializer=featurestore_service.ImportFeatureValuesRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["import_feature_values"]
@property
def batch_read_feature_values(
self,
) -> Callable[
[featurestore_service.BatchReadFeatureValuesRequest],
Awaitable[operations_pb2.Operation],
]:
r"""Return a callable for the batch read feature values method over gRPC.
Batch reads Feature values from a Featurestore.
This API enables batch reading Feature values, where
each read instance in the batch may read Feature values
of entities from one or more EntityTypes. Point-in-time
correctness is guaranteed for Feature values of each
read instance as of each instance's read timestamp.
Returns:
Callable[[~.BatchReadFeatureValuesRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "batch_read_feature_values" not in self._stubs:
self._stubs["batch_read_feature_values"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.FeaturestoreService/BatchReadFeatureValues",
request_serializer=featurestore_service.BatchReadFeatureValuesRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["batch_read_feature_values"]
@property
def export_feature_values(
self,
) -> Callable[
[featurestore_service.ExportFeatureValuesRequest],
Awaitable[operations_pb2.Operation],
]:
r"""Return a callable for the export feature values method over gRPC.
Exports Feature values from all the entities of a
target EntityType.
Returns:
Callable[[~.ExportFeatureValuesRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "export_feature_values" not in self._stubs:
self._stubs["export_feature_values"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.FeaturestoreService/ExportFeatureValues",
request_serializer=featurestore_service.ExportFeatureValuesRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["export_feature_values"]
@property
def search_features(
self,
) -> Callable[
[featurestore_service.SearchFeaturesRequest],
Awaitable[featurestore_service.SearchFeaturesResponse],
]:
r"""Return a callable for the search features method over gRPC.
Searches Features matching a query in a given
project.
Returns:
Callable[[~.SearchFeaturesRequest],
Awaitable[~.SearchFeaturesResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "search_features" not in self._stubs:
self._stubs["search_features"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.FeaturestoreService/SearchFeatures",
request_serializer=featurestore_service.SearchFeaturesRequest.serialize,
response_deserializer=featurestore_service.SearchFeaturesResponse.deserialize,
)
return self._stubs["search_features"]
def close(self):
return self.grpc_channel.close()
__all__ = ("FeaturestoreServiceGrpcAsyncIOTransport",)
|
the-stack_0_11769 | ###############################################################################
#
# ChartScatter - A class for writing the Excel XLSX Scatter charts.
#
# Copyright 2013-2016, John McNamara, [email protected]
#
from . import chart
class ChartScatter(chart.Chart):
"""
A class for writing the Excel XLSX Scatter charts.
"""
###########################################################################
#
# Public API.
#
###########################################################################
def __init__(self, options=None):
"""
Constructor.
"""
super(ChartScatter, self).__init__()
if options is None:
options = {}
self.subtype = options.get('subtype')
if not self.subtype:
self.subtype = 'marker_only'
self.cross_between = 'midCat'
self.horiz_val_axis = 0
self.val_axis_position = 'b'
self.smooth_allowed = True
self.requires_category = True
# Set the available data label positions for this chart type.
self.label_position_default = 'right'
self.label_positions = {
'center': 'ctr',
'right': 'r',
'left': 'l',
'above': 't',
'below': 'b',
# For backward compatibility.
'top': 't',
'bottom': 'b'}
def combine(self, chart=None):
"""
Create a combination chart with a secondary chart.
Note: Override parent method to add a warning.
Args:
chart: The secondary chart to combine with the primary chart.
Returns:
Nothing.
"""
if chart is None:
return
warn('Combined chart not currently supported with scatter chart '
'as the primary chart')
###########################################################################
#
# Private API.
#
###########################################################################
def _write_chart_type(self, args):
# Override the virtual superclass method with a chart specific method.
# Write the c:scatterChart element.
self._write_scatter_chart(args)
###########################################################################
#
# XML methods.
#
###########################################################################
def _write_scatter_chart(self, args):
# Write the <c:scatterChart> element.
if args['primary_axes']:
series = self._get_primary_axes_series()
else:
series = self._get_secondary_axes_series()
if not len(series):
return
style = 'lineMarker'
subtype = self.subtype
# Set the user defined chart subtype.
if subtype == 'marker_only':
style = 'lineMarker'
if subtype == 'straight_with_markers':
style = 'lineMarker'
if subtype == 'straight':
style = 'lineMarker'
if subtype == 'smooth_with_markers':
style = 'smoothMarker'
if subtype == 'smooth':
style = 'smoothMarker'
# Add default formatting to the series data.
self._modify_series_formatting()
self._xml_start_tag('c:scatterChart')
# Write the c:scatterStyle element.
self._write_scatter_style(style)
# Write the series elements.
for data in series:
self._write_ser(data)
# Write the c:marker element.
self._write_marker_value()
# Write the c:axId elements
self._write_axis_ids(args)
self._xml_end_tag('c:scatterChart')
def _write_ser(self, series):
# Over-ridden to write c:xVal/c:yVal instead of c:cat/c:val elements.
# Write the <c:ser> element.
index = self.series_index
self.series_index += 1
self._xml_start_tag('c:ser')
# Write the c:idx element.
self._write_idx(index)
# Write the c:order element.
self._write_order(index)
# Write the series name.
self._write_series_name(series)
# Write the c:spPr element.
self._write_sp_pr(series)
# Write the c:marker element.
self._write_marker(series.get('marker'))
# Write the c:dPt element.
self._write_d_pt(series.get('points'))
# Write the c:dLbls element.
self._write_d_lbls(series.get('labels'))
# Write the c:trendline element.
self._write_trendline(series.get('trendline'))
# Write the c:errBars element.
self._write_error_bars(series.get('error_bars'))
# Write the c:xVal element.
self._write_x_val(series)
# Write the c:yVal element.
self._write_y_val(series)
# Write the c:smooth element.
if 'smooth' in self.subtype and series['smooth'] is None:
# Default is on for smooth scatter charts.
self._write_c_smooth(True)
else:
self._write_c_smooth(series['smooth'])
self._xml_end_tag('c:ser')
def _write_plot_area(self):
# Over-ridden to have 2 valAx elements for scatter charts instead
# of catAx/valAx.
#
# Write the <c:plotArea> element.
self._xml_start_tag('c:plotArea')
# Write the c:layout element.
self._write_layout(self.plotarea.get('layout'), 'plot')
# Write the subclass chart elements for primary and secondary axes.
self._write_chart_type({'primary_axes': 1})
self._write_chart_type({'primary_axes': 0})
# Write c:catAx and c:valAx elements for series using primary axes.
self._write_cat_val_axis({'x_axis': self.x_axis,
'y_axis': self.y_axis,
'axis_ids': self.axis_ids,
'position': 'b',
})
tmp = self.horiz_val_axis
self.horiz_val_axis = 1
self._write_val_axis({'x_axis': self.x_axis,
'y_axis': self.y_axis,
'axis_ids': self.axis_ids,
'position': 'l',
})
self.horiz_val_axis = tmp
# Write c:valAx and c:catAx elements for series using secondary axes
self._write_cat_val_axis({'x_axis': self.x2_axis,
'y_axis': self.y2_axis,
'axis_ids': self.axis2_ids,
'position': 'b',
})
self.horiz_val_axis = 1
self._write_val_axis({'x_axis': self.x2_axis,
'y_axis': self.y2_axis,
'axis_ids': self.axis2_ids,
'position': 'l',
})
# Write the c:spPr element for the plotarea formatting.
self._write_sp_pr(self.plotarea)
self._xml_end_tag('c:plotArea')
def _write_x_val(self, series):
# Write the <c:xVal> element.
formula = series.get('categories')
data_id = series.get('cat_data_id')
data = self.formula_data[data_id]
self._xml_start_tag('c:xVal')
# Check the type of cached data.
data_type = self._get_data_type(data)
# TODO. Can a scatter plot have non-numeric data.
if data_type == 'str':
# Write the c:numRef element.
self._write_str_ref(formula, data, data_type)
else:
# Write the c:numRef element.
self._write_num_ref(formula, data, data_type)
self._xml_end_tag('c:xVal')
def _write_y_val(self, series):
# Write the <c:yVal> element.
formula = series.get('values')
data_id = series.get('val_data_id')
data = self.formula_data[data_id]
self._xml_start_tag('c:yVal')
# Unlike Cat axes data should only be numeric.
# Write the c:numRef element.
self._write_num_ref(formula, data, 'num')
self._xml_end_tag('c:yVal')
def _write_scatter_style(self, val):
# Write the <c:scatterStyle> element.
attributes = [('val', val)]
self._xml_empty_tag('c:scatterStyle', attributes)
def _modify_series_formatting(self):
# Add default formatting to the series data unless it has already been
# specified by the user.
subtype = self.subtype
# The default scatter style "markers only" requires a line type.
if subtype == 'marker_only':
# Go through each series and define default values.
for series in self.series:
# Set a line type unless there is already a user defined type.
if not series['line']['defined']:
series['line'] = {'width': 2.25,
'none': 1,
'defined': 1,
}
# Turn markers off for subtypes that don't have them.
if 'marker' not in subtype:
# Go through each series and define default values.
for series in self.series:
# Set a marker type unless there is a user defined type.
if not series.get('marker'):
series['marker'] = {'type': 'none', 'defined': 1}
def _write_d_pt_point(self, index, point):
# Write an individual <c:dPt> element. Override the parent method to
# add markers.
self._xml_start_tag('c:dPt')
# Write the c:idx element.
self._write_idx(index)
self._xml_start_tag('c:marker')
# Write the c:spPr element.
self._write_sp_pr(point)
self._xml_end_tag('c:marker')
self._xml_end_tag('c:dPt')
|
the-stack_0_11770 | #encoding: utf-8
import json
from django import template
from django.conf import settings
register = template.Library()
@register.inclusion_tag('laws/bill_full_name.html')
def bill_full_name(bill):
return { 'bill': bill }
@register.inclusion_tag('laws/bill_list_item.html')
def bill_list_item(bill, add_li=True, show_tags=True):
return { 'bill': bill, 'add_li': add_li, 'show_tags': show_tags }
@register.inclusion_tag('laws/item_tags.html')
def item_tags(tags):
return { 'tags': tags }
def split_member_vote_list_by_party(member_vote_list):
''' create a party partitioned list of "for" voters and "against" voters '''
list_by_party = []
if len(member_vote_list) > 0:
''' first party, first member '''
curr_party = { 'party' : member_vote_list[0].member.current_party.name,
'members' : []}
for vote in member_vote_list:
member = {'name' : vote.member.name,
'url' : vote.member.get_absolute_url(),
'img_url' : vote.member.img_url,
'id' : vote.member.id}
if vote.member.current_party.name == curr_party['party']:
curr_party['members'].append(member)
else:
list_by_party.append(curr_party)
curr_party = { 'party' : vote.member.current_party.name,
'members' : [member]}
''' last party '''
list_by_party.append(curr_party)
return list_by_party
def create_vote_dict(vote):
for_vote_sorted = vote.for_votes()\
.order_by('member__current_party')\
.select_related('member','member__current_party')
for_vote_sorted = list(for_vote_sorted)
for_votes_grouped = split_member_vote_list_by_party(for_vote_sorted)
against_vote_sorted = vote.against_votes()\
.order_by('member__current_party')\
.select_related('member','member__current_party')
against_vote_sorted = list(against_vote_sorted)
against_votes_grouped = split_member_vote_list_by_party(against_vote_sorted)
vote_drill_data = dict({'against': dict({'count': len(against_vote_sorted),
'votes' : against_votes_grouped}),
'for': dict({ 'count' : len(for_vote_sorted),
'votes' : for_votes_grouped})})
vote_dict = dict({'vote' : vote,
'vote_drill_data' : json.dumps(vote_drill_data),
'vote_passed' : vote.for_votes_count > vote.against_votes_count,
'vote_time' : {'day' : vote.time.day,
'month' : vote.time.month,
'year' : vote.time.year}})
return vote_dict
def get_explanation(bill, proposals):
if hasattr(bill, 'knesset_proposal'):
if bill.knesset_proposal.get_explanation() != '':
return bill.knesset_proposal.get_explanation()
if hasattr(bill, 'gov_proposal'):
if bill.gov_proposal.get_explanation() != '':
return bill.gov_proposal.get_explanation()
for proposal in proposals:
if proposal.get_explanation() != '':
return proposal.get_explanation()
@register.inclusion_tag('laws/bill_inabox.html')
def bill_inabox(bill):
""" TODO: firstX and not first3"""
proposals = list(bill.proposals.all())
proposers = bill.proposers.all()
bill_inabox_dict = {
'bill': bill,
'billurl': 'http://oknesset.org%s' % bill.get_absolute_url(),
'proposers_first3': proposers[:3],
'proposers_count_minus3': len(proposers) - 3,
'explanation': get_explanation(bill, proposals),
}
#proposal
if proposals:
proposal = proposals[-1]
bill_inabox_dict['proposal'] = dict({'day' : proposal.date.day,
'month' : proposal.date.month,
'year' : proposal.date.year})
#pre vote
pre_votes = list(bill.pre_votes.all())
pre_vote = None
if pre_votes:
pre_vote = pre_votes[-1]
bill_inabox_dict['pre_vote'] = create_vote_dict(pre_vote)
#first_committee_meetings
cms = list(bill.first_committee_meetings.all())
if cms:
first_committee_meetings = cms[-1]
bill_inabox_dict['first_committee_meetings'] = dict({'day' : first_committee_meetings.date.day,
'month' : first_committee_meetings.date.month,
'year' : first_committee_meetings.date.year,
'url' : first_committee_meetings.get_absolute_url()})
#first vote
fv = bill.first_vote
if fv:
bill_inabox_dict['first_vote'] = create_vote_dict(fv)
#second_committee_meetings
cms = list(bill.second_committee_meetings.all())
if cms:
second_committee_meetings = cms[-1]
bill_inabox_dict['second_committee_meetings'] = dict({'day' : second_committee_meetings.date.day,
'month' : second_committee_meetings.date.month,
'year' : second_committee_meetings.date.year,
'url' : second_committee_meetings.get_absolute_url()})
#second+third vote (approval_vote)
av = bill.approval_vote
if av:
bill_inabox_dict['approval_vote'] = create_vote_dict(av)
return bill_inabox_dict
|
the-stack_0_11774 | # -*- coding: utf-8 -*-
# Copyright (c) 2020 Nekokatt
# Copyright (c) 2021-present davfsa
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Pytest integration."""
import os
from pipelines import config
from pipelines import nox
RUN_FLAGS = [
"-c",
config.PYPROJECT_TOML,
"--showlocals",
]
COVERAGE_FLAGS = [
"--cov",
config.MAIN_PACKAGE,
"--cov-config",
config.PYPROJECT_TOML,
"--cov-report",
"term",
"--cov-report",
f"html:{config.COVERAGE_HTML_PATH}",
"--cov-report",
"xml",
]
@nox.session(reuse_venv=True)
def pytest(session: nox.Session) -> None:
"""Run unit tests and measure code coverage.
Coverage can be disabled with the `--skip-coverage` flag.
"""
session.install("-r", "requirements.txt", "-r", "dev-requirements.txt")
_pytest(session)
@nox.session(reuse_venv=True)
def pytest_all_features(session: nox.Session) -> None:
"""Run unit tests and measure code coverage, using speedup modules.
Coverage can be disabled with the `--skip-coverage` flag.
"""
session.install(
"-r",
"requirements.txt",
"-r",
"server-requirements.txt",
"-r",
"speedup-requirements.txt",
"-r",
"dev-requirements.txt",
)
_pytest(session, "-OO")
def _pytest(session: nox.Session, *py_flags: str) -> None:
if "--skip-coverage" in session.posargs:
session.posargs.remove("--skip-coverage")
flags = RUN_FLAGS
else:
flags = [*RUN_FLAGS, *COVERAGE_FLAGS]
session.run("python", *py_flags, "-m", "pytest", *flags, *session.posargs, config.TEST_PACKAGE)
|
the-stack_0_11778 | import copy
import argparse
import json
import pickle
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
import egg.core as core
from egg.core.util import find_lengths
from egg.core import EarlyStopperAccuracy
from egg.core import CheckpointSaver
from egg.zoo.imitation_learning.archs import (
PlusOneWrapper,
Receiver,
Sender,
)
from egg.zoo.compo_vs_generalization import train as compo_vs_generalization
from egg.zoo.compo_vs_generalization.data import (
ScaledDataset,
enumerate_attribute_value,
one_hotify,
select_subset_V1,
select_subset_V2,
split_holdout,
split_train_test,
)
from egg.zoo.imitation_learning.loader import *
from egg.zoo.imitation_learning.util import *
def eval_expert(metadata_path: str):
checkpoint_wrapper = load_metadata_from_pkl(metadata_path)
params = checkpoint_wrapper['params']
params.append('--load_from_checkpoint={}'.format(checkpoint_wrapper['checkpoint_path']))
compo_vs_generalization.main(params, train_mode=False)
def eval_bc_prediction(new_sender, new_receiver, trainer, t=None, checkpoint_path=None):
_, interaction = trainer.eval()
r_loss, r_acc, _ = new_receiver.score(interaction, val=True)
s_loss, s_acc, _ = new_sender.score(interaction, val=True)
print('Epoch: {}; Receiver val loss: {}; Sender val loss: {}'.format(t, r_loss, s_loss))
return r_loss, s_loss, r_acc, s_acc
def eval_expert_original_task(trainer):
# print('About to evaluate og agents on og task')
mean_loss, interaction = trainer.eval()
acc_or, acc = interaction.aux['acc_or'].mean(), interaction.aux['acc'].mean()
print('Expert Loss: {}. Acc_or: {}. Acc: {}'.format(mean_loss, acc_or, acc))
# input()
return mean_loss, acc, acc_or
def eval_bc_original_task(new_trainer, t=None, checkpoint_path=None):
mean_loss, interaction = new_trainer.eval()
acc_or, acc = interaction.aux['acc_or'].mean(), interaction.aux['acc'].mean()
print('Epoch: {}; Original Task Loss: {}. Acc_or: {}. Acc: {}'.format(t, mean_loss, acc_or, acc))
return mean_loss, acc, acc_or # new results
def train_bc(bc_args, new_sender, new_receiver, optimizer_s, optimizer_r, trainer,
new_trainer=None, imitation=False, perf_log=None, sender_aware_weight=0.0):
new_receiver_converged = False
new_sender_converged = False
receiver_converged_epoch, sender_converged_epoch = 0, 0
cumu_r_loss, cumu_s_loss = torch.zeros(bc_args.n_epochs_bc), torch.zeros(bc_args.n_epochs_bc)
cumu_r_acc, cumu_s_acc = torch.empty(bc_args.n_epochs_bc), torch.empty(bc_args.n_epochs_bc)
reinforce_loss_for_sender = torch.zeros(bc_args.n_epochs_bc)
for t in range(bc_args.n_epochs_bc):
val = t % bc_args.val_interval == 0
if val:
new_sender.eval()
new_receiver.eval()
r_loss, s_loss, r_acc, s_acc = eval_bc_prediction(new_sender, new_receiver, trainer, t)
if new_trainer is not None: mean_loss, acc, acc_or = eval_bc_original_task(new_trainer, t)
if perf_log is not None:
log_performance(perf_log, r_loss.item(),
s_loss.item(), r_acc.item(), s_acc.item(), mean_loss,
acc.item(), acc_or.item(), sender_converged_epoch, receiver_converged_epoch)
_, interaction = trainer.eval(trainer.train_data)
trainer.game.train()
if not new_receiver_converged:
new_receiver.train()
r_loss, r_acc, aux_info = train_epoch(
optimizer_r,
new_receiver,
interaction,
expert=trainer.game.receiver,
imitation=imitation,
aux_info={'expert_sender': train.game.sender if sender_aware_weight > 0 else None,
'sender_aware': sender_aware_weight > 0}
)
reinforce_loss_for_sender[t] = aux_info['reinforce_loss']
cumu_r_loss[t] = r_loss
cumu_r_acc[t] = r_acc
new_receiver_converged = get_grad_norm(new_receiver) < bc_args.convergence_epsilon
receiver_converged_epoch = t
if not new_sender_converged:
new_sender.train()
s_loss, s_acc, _ = train_epoch(
optimizer_s,
new_sender,
interaction,
expert=trainer.game.sender,
imitation=imitation
)
cumu_s_loss[t] = s_loss
cumu_s_acc[t] = s_acc
new_sender_converged = get_grad_norm(new_sender) < bc_args.convergence_epsilon
sender_converged_epoch = t
if new_receiver_converged and new_sender_converged:
print('Both receiver and sender gradients < epsilon={}'.format(bc_args.convergence_epsilon))
break
print('Epoch: {}; Receiver loss: {}; Sender loss: {}; R acc: {}; S acc: {}'.format(t, r_loss, s_loss, r_acc, s_acc))
cumu_s_loss += sender_aware_weight * reinforce_loss_for_sender
cumu_s_loss = cumu_s_loss.sum()
cumu_r_loss = cumu_r_loss.sum()
return cumu_s_loss, cumu_r_loss, t, s_acc, r_acc, cumu_s_acc, cumu_r_acc
def train_epoch(optimizer, agent, interaction, expert=None, imitation=False, aux_info={}):
optimizer.zero_grad()
loss, acc, aux = agent.score(interaction, expert=expert, imitation=imitation, aux_info=aux_info)
loss.backward()
optimizer.step()
return loss, acc, aux
def main(metadata_path: str, bc_params, expert_seed):
bc_args = get_bc_params(bc_params)
checkpoint_wrapper = load_metadata_from_pkl(metadata_path)
params = checkpoint_wrapper['params']
params.append('--load_from_checkpoint={}'.format(checkpoint_wrapper['checkpoint_path']))
params = list(filter(lambda x: 'random_seed' not in x, params))
params.append('--random_seed={}'.format(bc_args.bc_random_seed))
opts = get_params(params)
device = torch.device("cuda:0" if torch.cuda.is_available() else 'cpu')
# New agents
new_sender, new_receiver = bc_agents_setup(opts, device, *define_agents(opts))
optimizer_r = torch.optim.Adam(new_receiver.parameters(), lr=opts.lr)
optimizer_s = torch.optim.Adam(new_sender.parameters(), lr=opts.lr)
# Dataloader
trainer = expert_setup(opts)
new_trainer = copy.deepcopy(trainer)
new_trainer.game.sender, new_trainer.game.receiver = new_sender.agent, new_receiver.agent
# Logging
perf_log = {
'r_loss': [],
's_loss': [],
'r_acc': [],
's_acc': [],
'mean_loss': [],
'acc': [],
'acc_or': [],
'epoch': [],
'epoch_speaker': [],
'epoch_receiver': []
}
train_bc(bc_args, new_sender, new_receiver, optimizer_s, optimizer_r, trainer, new_trainer, perf_log)
# Last validation score
print('==============================================')
print('Last validation score')
r_loss, s_loss, r_acc, s_acc = eval_bc_prediction(new_sender, new_receiver, trainer, t=t)
# Integrate with og environment on validation
print('Last validation score on original task')
mean_loss, acc, acc_or = eval_bc_original_task(new_trainer, t=t)
# Original model score
print('Expert validation on original task')
eval_expert_original_task(trainer)
log_performance(perf_log, r_loss.item(), s_loss.item(), r_acc.item(), s_acc.item(), mean_loss, acc.item(), acc_or.item(), sender_converged_epoch,
receiver_converged_epoch)
# Save BC model
if bc_args.save_bc:
save_behavioral_clones(bc_args, params, new_receiver, new_sender,
optimizer_r, optimizer_s, metadata_path, perf_log, expert_seed)
core.close()
if __name__=='__main__':
import sys
import random
# for i in range(100):
# try:
# resave_compo_metrics_on_whole_dataset('saved_models/' +
# 'n_val_10_n_att_2_vocab_100_max_len_3_hidden_500/' +
# 'checkpoint_wrapper_randomseed{}.pkl'.format(i))
# except:
# continue
# # run program for all the things
for seed in range(101, 131):
print('Random seed: ', seed)
random.seed(seed)
params = sys.argv[1:].copy()
params.append('--bc_random_seed={}'.format(seed))
for i in range(100):
try:
main('saved_models/' +
'n_val_10_n_att_2_vocab_100_max_len_3_hidden_500/' +
'checkpoint_wrapper_randomseed{}.pkl'.format(i), params, i)
except(FileNotFoundError):
continue
|
the-stack_0_11780 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Pig(Package):
"""
Pig is a dataflow programming environment for processing very large files.
Pig's language is called Pig Latin. A Pig Latin program consists of a
directed acyclic graph where each node represents an operation that
transforms data.
"""
homepage = "http://archive.apache.org"
url = "http://archive.apache.org/dist/hadoop/pig/stable/pig-0.7.0.tar.gz"
version('0.7.0', sha256='fa7211fb339f547f679a3dd90055f1ddc45d5754d88463e4cc39c380ddf8b02a')
def install(self, spec, prefix):
install_tree('.', prefix)
|
the-stack_0_11782 | # Copyright (c) 2014 The Bitcoin Core developers
# Copyright (c) 2014-2015 The Dash developers
# Copyright (c) 2015-2017 The PIVX developers
# Copyright (c) 2017 The Peps developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Helpful routines for regression testing
#
# Add python-bitcoinrpc to module search path:
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "python-bitcoinrpc"))
from decimal import Decimal, ROUND_DOWN
import json
import random
import shutil
import subprocess
import time
import re
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
def p2p_port(n):
return 11000 + n + os.getpid()%999
def rpc_port(n):
return 12000 + n + os.getpid()%999
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def sync_blocks(rpc_connections):
"""
Wait until everybody has the same block count
"""
while True:
counts = [ x.getblockcount() for x in rpc_connections ]
if counts == [ counts[0] ]*len(counts):
break
time.sleep(1)
def sync_mempools(rpc_connections):
"""
Wait until everybody has the same transactions in their memory
pools
"""
while True:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match+1
if num_match == len(rpc_connections):
break
time.sleep(1)
bitcoind_processes = {}
def initialize_datadir(dirname, n):
datadir = os.path.join(dirname, "node"+str(n))
if not os.path.isdir(datadir):
os.makedirs(datadir)
with open(os.path.join(datadir, "peps.conf"), 'w') as f:
f.write("regtest=1\n");
f.write("rpcuser=rt\n");
f.write("rpcpassword=rt\n");
f.write("port="+str(p2p_port(n))+"\n");
f.write("rpcport="+str(rpc_port(n))+"\n");
return datadir
def initialize_chain(test_dir):
"""
Create (or copy from cache) a 200-block-long chain and
4 wallets.
pepsd and peps-cli must be in search path.
"""
if not os.path.isdir(os.path.join("cache", "node0")):
devnull = open("/dev/null", "w+")
# Create cache directories, run pepsd:
for i in range(4):
datadir=initialize_datadir("cache", i)
args = [ os.getenv("BITCOIND", "pepsd"), "-keypool=1", "-datadir="+datadir, "-discover=0" ]
if i > 0:
args.append("-connect=127.0.0.1:"+str(p2p_port(0)))
bitcoind_processes[i] = subprocess.Popen(args)
subprocess.check_call([ os.getenv("BITCOINCLI", "peps-cli"), "-datadir="+datadir,
"-rpcwait", "getblockcount"], stdout=devnull)
devnull.close()
rpcs = []
for i in range(4):
try:
url = "http://rt:[email protected]:%d"%(rpc_port(i),)
rpcs.append(AuthServiceProxy(url))
except:
sys.stderr.write("Error connecting to "+url+"\n")
sys.exit(1)
# Create a 200-block-long chain; each of the 4 nodes
# gets 25 mature blocks and 25 immature.
# blocks are created with timestamps 10 minutes apart, starting
# at 1 Jan 2014
block_time = 1388534400
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(rpcs, block_time)
rpcs[peer].setgenerate(True, 1)
block_time += 10*60
# Must sync before next peer starts generating blocks
sync_blocks(rpcs)
# Shut them down, and clean up cache directories:
stop_nodes(rpcs)
wait_bitcoinds()
for i in range(4):
os.remove(log_filename("cache", i, "debug.log"))
os.remove(log_filename("cache", i, "db.log"))
os.remove(log_filename("cache", i, "peers.dat"))
os.remove(log_filename("cache", i, "fee_estimates.dat"))
for i in range(4):
from_dir = os.path.join("cache", "node"+str(i))
to_dir = os.path.join(test_dir, "node"+str(i))
shutil.copytree(from_dir, to_dir)
initialize_datadir(test_dir, i) # Overwrite port/rpcport in peps.conf
def initialize_chain_clean(test_dir, num_nodes):
"""
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization.
"""
for i in range(num_nodes):
datadir=initialize_datadir(test_dir, i)
def _rpchost_to_args(rpchost):
'''Convert optional IP:port spec to rpcconnect/rpcport args'''
if rpchost is None:
return []
match = re.match('(\[[0-9a-fA-f:]+\]|[^:]+)(?::([0-9]+))?$', rpchost)
if not match:
raise ValueError('Invalid RPC host spec ' + rpchost)
rpcconnect = match.group(1)
rpcport = match.group(2)
if rpcconnect.startswith('['): # remove IPv6 [...] wrapping
rpcconnect = rpcconnect[1:-1]
rv = ['-rpcconnect=' + rpcconnect]
if rpcport:
rv += ['-rpcport=' + rpcport]
return rv
def start_node(i, dirname, extra_args=None, rpchost=None):
"""
Start a pepsd and return RPC connection to it
"""
datadir = os.path.join(dirname, "node"+str(i))
args = [ os.getenv("BITCOIND", "pepsd"), "-datadir="+datadir, "-keypool=1", "-discover=0", "-rest" ]
if extra_args is not None: args.extend(extra_args)
bitcoind_processes[i] = subprocess.Popen(args)
devnull = open("/dev/null", "w+")
subprocess.check_call([ os.getenv("BITCOINCLI", "peps-cli"), "-datadir="+datadir] +
_rpchost_to_args(rpchost) +
["-rpcwait", "getblockcount"], stdout=devnull)
devnull.close()
url = "http://rt:rt@%s:%d" % (rpchost or '127.0.0.1', rpc_port(i))
proxy = AuthServiceProxy(url)
proxy.url = url # store URL on proxy for info
return proxy
def start_nodes(num_nodes, dirname, extra_args=None, rpchost=None):
"""
Start multiple pepsds, return RPC connections to them
"""
if extra_args is None: extra_args = [ None for i in range(num_nodes) ]
return [ start_node(i, dirname, extra_args[i], rpchost) for i in range(num_nodes) ]
def log_filename(dirname, n_node, logname):
return os.path.join(dirname, "node"+str(n_node), "regtest", logname)
def stop_node(node, i):
node.stop()
bitcoind_processes[i].wait()
del bitcoind_processes[i]
def stop_nodes(nodes):
for node in nodes:
node.stop()
del nodes[:] # Emptying array closes connections as a side effect
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def wait_bitcoinds():
# Wait for all bitcoinds to cleanly exit
for bitcoind in bitcoind_processes.values():
bitcoind.wait()
bitcoind_processes.clear()
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:"+str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()):
time.sleep(0.1)
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def find_output(node, txid, amount):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found"%(txid,str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert(confirmations_required >=0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({ "txid" : t["txid"], "vout" : t["vout"], "address" : t["address"] } )
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d"%(amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out+fee
change = amount_in - amount
if change > amount*2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change/2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def send_zeropri_transaction(from_node, to_node, amount, fee):
"""
Create&broadcast a zero-priority transaction.
Returns (txid, hex-encoded-txdata)
Ensures transaction is zero-priority by first creating a send-to-self,
then using it's output
"""
# Create a send-to-self with confirmed inputs:
self_address = from_node.getnewaddress()
(total_in, inputs) = gather_inputs(from_node, amount+fee*2)
outputs = make_change(from_node, total_in, amount+fee, fee)
outputs[self_address] = float(amount+fee)
self_rawtx = from_node.createrawtransaction(inputs, outputs)
self_signresult = from_node.signrawtransaction(self_rawtx)
self_txid = from_node.sendrawtransaction(self_signresult["hex"], True)
vout = find_output(from_node, self_txid, amount+fee)
# Now immediately spend the output to create a 1-input, 1-output
# zero-priority transaction:
inputs = [ { "txid" : self_txid, "vout" : vout } ]
outputs = { to_node.getnewaddress() : float(amount) }
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"])
def random_zeropri_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random zero-priority transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(txid, txhex) = send_zeropri_transaction(from_node, to_node, amount, fee)
return (txid, txhex, fee)
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount+fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
def assert_equal(thing1, thing2):
if thing1 != thing2:
raise AssertionError("%s != %s"%(str(thing1),str(thing2)))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s"%(str(thing1),str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
try:
fun(*args, **kwds)
except exc:
pass
except Exception as e:
raise AssertionError("Unexpected exception raised: "+type(e).__name__)
else:
raise AssertionError("No exception raised")
|
the-stack_0_11784 | # # ⚠ Warning
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
# LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# [🥭 Mango Markets](https://mango.markets/) support is available at:
# [Docs](https://docs.mango.markets/)
# [Discord](https://discord.gg/67jySBhxrg)
# [Twitter](https://twitter.com/mangomarkets)
# [Github](https://github.com/blockworks-foundation)
# [Email](mailto:[email protected])
import enum
import logging
import typing
from decimal import Decimal
from ..account import Account, AccountSlot
from ..accountinstrumentvalues import AccountInstrumentValues, PricedAccountInstrumentValues
from ..cache import Cache, MarketCache
from ..context import Context
from ..group import GroupSlotSpotMarket, GroupSlotPerpMarket, GroupSlot, Group
from ..instrumentvalue import InstrumentValue
from ..lotsizeconverter import NullLotSizeConverter
from ..openorders import OpenOrders
from ..perpaccount import PerpAccount
from ..token import Instrument
# # 🥭 HealthType enum
#
# Is the health calculation Initial or Maintenance?
#
class HealthType(enum.Enum):
# We use strings here so that argparse can work with these as parameters.
INITIAL = "INITIAL"
MAINTENANCE = "MAINTENANCE"
def __str__(self) -> str:
return self.value
def __repr__(self) -> str:
return f"{self}"
class HealthCalculator:
def __init__(self, context: Context, health_type: HealthType) -> None:
self.logger: logging.Logger = logging.getLogger(self.__class__.__name__)
self.context: Context = context
self.health_type: HealthType = health_type
def _calculate_pessimistic_spot_value(self, values: PricedAccountInstrumentValues) -> typing.Tuple[InstrumentValue, InstrumentValue]:
# base total if all bids were executed
if_all_bids_executed: InstrumentValue = values.quote_token_locked + values.base_token_total
# base total if all asks were executed
if_all_asks_executed: InstrumentValue = values.base_token_free
base: InstrumentValue
quote: InstrumentValue
if if_all_bids_executed > if_all_asks_executed:
base = values.net_value + if_all_bids_executed
quote = values.quote_token_free
return base, quote
else:
base = values.net_value + if_all_asks_executed
quote = values.base_token_locked + values.quote_token_total
return base, quote
def _calculate_pessimistic_perp_value(self, values: PricedAccountInstrumentValues) -> typing.Tuple[InstrumentValue, InstrumentValue]:
return values.perp_base_position, values.perp_quote_position
def _calculate_perp_value(self, basket_token: AccountSlot, token_price: InstrumentValue, market_index: int, cache: Cache, unadjustment_factor: Decimal) -> typing.Tuple[Decimal, Decimal]:
if basket_token.perp_account is None or basket_token.perp_account.empty:
return Decimal(0), Decimal(0)
perp_market_cache = cache.perp_market_cache[market_index]
if perp_market_cache is None:
raise Exception(f"Cache contains no perp market cache for market index {market_index}.")
perp_account: PerpAccount = basket_token.perp_account
token: Instrument = basket_token.base_instrument
base_lot_size: Decimal = perp_account.lot_size_converter.base_lot_size
quote_lot_size: Decimal = perp_account.lot_size_converter.quote_lot_size
takerQuote: Decimal = perp_account.taker_quote * quote_lot_size
base_position: Decimal = (perp_account.base_position + perp_account.taker_base) * base_lot_size
bids_quantity: Decimal = perp_account.bids_quantity * base_lot_size
asks_quantity: Decimal = perp_account.asks_quantity * base_lot_size
if_all_bids_executed = token.shift_to_decimals(base_position + bids_quantity) * unadjustment_factor
if_all_asks_executed = token.shift_to_decimals(base_position - asks_quantity) * unadjustment_factor
if abs(if_all_bids_executed) > abs(if_all_asks_executed):
quote_position = perp_account.quote_position - perp_account.unsettled_funding(perp_market_cache)
full_quote_position = quote_position + takerQuote - (bids_quantity * token_price.value)
return if_all_bids_executed, full_quote_position
else:
quote_position = perp_account.quote_position - perp_account.unsettled_funding(perp_market_cache)
full_quote_position = quote_position + takerQuote + (asks_quantity * token_price.value)
return if_all_asks_executed, full_quote_position
def calculate(self, account: Account, open_orders_by_address: typing.Dict[str, OpenOrders], group: Group, cache: Cache) -> Decimal:
priced_reports: typing.List[PricedAccountInstrumentValues] = []
for asset in account.base_slots:
# if (asset.deposit.value != 0) or (asset.borrow.value != 0) or (asset.net_value.value != 0):
report: AccountInstrumentValues = AccountInstrumentValues.from_account_basket_base_token(
asset, open_orders_by_address, group)
# print("report", report)
# price: InstrumentValue = group.token_price_from_cache(cache, report.base_token)
market_cache: MarketCache = group.market_cache_from_cache(cache, report.base_token)
# print("Market cache", market_cache)
priced_report: PricedAccountInstrumentValues = report.priced(market_cache)
# print("priced_report", priced_report)
priced_reports += [priced_report]
quote_token_free_in_open_orders: InstrumentValue = InstrumentValue(group.shared_quote_token, Decimal(0))
quote_token_total_in_open_orders: InstrumentValue = InstrumentValue(group.shared_quote_token, Decimal(0))
for priced_report in priced_reports:
quote_token_free_in_open_orders += priced_report.quote_token_free
quote_token_total_in_open_orders += priced_report.quote_token_total
# print("quote_token_free_in_open_orders", quote_token_free_in_open_orders)
# print("quote_token_total_in_open_orders", quote_token_total_in_open_orders)
quote_report: AccountInstrumentValues = AccountInstrumentValues(account.shared_quote_token,
account.shared_quote_token,
account.shared_quote.raw_deposit,
account.shared_quote.deposit,
account.shared_quote.raw_borrow,
account.shared_quote.borrow,
InstrumentValue(
group.shared_quote_token, Decimal(0)),
InstrumentValue(
group.shared_quote_token, Decimal(0)),
quote_token_free_in_open_orders,
quote_token_total_in_open_orders,
InstrumentValue(
group.shared_quote_token, Decimal(0)),
Decimal(0), Decimal(0),
InstrumentValue(
group.shared_quote_token, Decimal(0)),
InstrumentValue(
group.shared_quote_token, Decimal(0)),
Decimal(0), Decimal(0),
NullLotSizeConverter())
# print("quote_report", quote_report)
health: Decimal = quote_report.net_value.value
# print("Health (start)", health)
for priced_report in priced_reports:
slot: GroupSlot = group.slot_by_instrument(priced_report.base_token)
spot_market: typing.Optional[GroupSlotSpotMarket] = slot.spot_market
if spot_market is None:
raise Exception(f"Could not find market for spot token {priced_report.base_token.symbol}.")
base_value, quote_value = self._calculate_pessimistic_spot_value(priced_report)
spot_weight = spot_market.init_asset_weight if base_value > 0 else spot_market.init_liab_weight
spot_health = base_value.value * spot_weight
# print("Weights", base_value.value, "*", spot_weight, spot_health)
perp_base, perp_quote = priced_report.if_worst_execution()
perp_market: typing.Optional[GroupSlotPerpMarket] = slot.perp_market
perp_health: Decimal = Decimal(0)
if perp_market is not None:
perp_weight = perp_market.init_asset_weight if perp_base > 0 else perp_market.init_liab_weight
perp_health = perp_base.value * perp_weight
health += spot_health
health += perp_health
health += quote_value.value
health += perp_quote.value
health += priced_report.raw_perp_quote_position
# print("Health (now)", health, spot_health, perp_health, quote_value.value,
# perp_quote.value, priced_report.raw_perp_quote_position)
# print("Health (returning)", health)
return health
def __str__(self) -> str:
return f"« 𝙷𝚎𝚊𝚕𝚝𝚑𝙲𝚊𝚕𝚌𝚞𝚕𝚊𝚝𝚘𝚛 [{self.health_type}] »"
def __repr__(self) -> str:
return f"{self}"
|
the-stack_0_11785 | n = 10
m = 4
stack = []
def main():
while True:
if is_full_solution():
is_acceptable()
if has_next_solution():
try_next_solution()
else:
backtrack()
continue
if can_expand_solution():
expand_solution()
continue
break
def is_full_solution():
return len(stack) == m
def is_acceptable():
if len(stack) == m and stack[len(stack) - 1] < stack[len(stack) - 2]:
print(stack)
def can_expand_solution():
if len(stack) < m:
return True
def expand_solution():
stack.append(m - len(stack))
def has_next_solution():
return stack[len(stack) - 1] + 1 < stack[len(stack) - 2]
def try_next_solution():
stack[len(stack) - 1] += 1
def backtrack():
global stack
cursor = len(stack) - 1
while stack[cursor] - stack[cursor - 1] == -1 and cursor - 1 >= 0:
cursor -= 1
stack = stack[:cursor+1]
# increase one
stack[-1] += 1
if stack[0] > n:
raise
main()
|
the-stack_0_11787 | import os
import warnings
from collections import OrderedDict
from itertools import product
from typing import Any, Dict, List, Optional, Union
import torch
from torch.nn.functional import interpolate
from torch.nn.modules import LSTM
from torch.nn.modules.conv import Conv2d
from torch.nn.modules.linear import Linear
import delve
from delve.logger import log
from delve.metrics import *
from delve.torch_utils import TorchCovarianceMatrix
from delve.writers import STATMAP, WRITERS, CompositWriter, NPYWriter
class CheckLayerSat(object):
"""Takes PyTorch module and records layer saturation,
intrinsic dimensionality and other scalars.
Args:
savefile (str) : destination for summaries
save_to (str, List[Union[str, delve.writers.AbstractWriter]]:
Specify one or multiple save strategies.
You can use preimplemented save strategies or inherit from
the AbstractWriter in order to implement your
own preferred saving strategy.
pre-existing saving strategies are:
csv : stores all stats in a csv-file with one
row for each epoch.
plot : produces plots from intrinsic dimensionality
and / or layer saturation
tensorboard : saves all stats to tensorboard
print : print all metrics on console
as soon as they are logged
npy : creates a folder-structure with npy-files
containing the logged values. This is the only
save strategy that can save the
full covariance matrix.
This strategy is useful if you want to reproduce
intrinsic dimensionality and saturation values
with other thresholds without re-evaluating
model checkpoints.
modules (torch modules or list of modules) : layer-containing object.
Per default, only Conv2D,
Linear and LSTM-Cells
are recorded
writers_args (dict) : contains additional arguments passed over to the
writers. This is only used, when a writer is
initialized through a string-key.
log_interval (int) : distances between two batches used for updating the
covariance matrix. Default value is 1, which means
that all data is used for computing
intrinsic dimensionality and saturation.
Increasing the log interval is usefull on very
large datasets to reduce numeric instability.
max_samples (int) : (optional) the covariance matrix in each layer
will halt updating itself when max_samples
are reached. Usecase is similar to log-interval,
when datasets are very large.
stats (list of str): list of stats to compute
supported stats are:
idim : intrinsic dimensionality
lsat : layer saturation (intrinsic dimensionality divided by feature space dimensionality)
cov : the covariance-matrix (only saveable using the 'npy' save strategy)
det : the determinant of the covariance matrix (also known as generalized variance)
trc : the trace of the covariance matrix, generally a more useful metric than det for determining
the total variance of the data than the determinant.
However note that this does not take the correlation between
features into account. On the other hand, in most cases the determinent will be zero, since
there will be very strongly correlated features, so trace might be the better option.
dtrc : the trace of the diagonalmatrix, another way of measuring the dispersion of the data.
lsat : layer saturation (intrinsic dimensionality
divided by feature space dimensionality)
embed : samples embedded in the eigenspace of dimension 2
layerwise_sat (bool): whether or not to include
layerwise saturation when saving
reset_covariance (bool): True by default, resets the covariance
every time the stats are computed. Disabling
this option will strongly bias covariance
since the gradient will influence the model.
We recommend computing saturation at the
end of training and testing.
include_conv : setting to False includes only linear layers
conv_method (str) : how to subsample convolutional layers. Default is
channelwise, which means that the each position of
the filter tensor is considered a datapoint,
effectivly yielding a data matrix of shape
(height*width*batch_size, num_filters)
supported methods are:
channelwise : treats every depth vector of the tensor as a
datapoint, effectivly reshaping the data tensor
from shape (batch_size, height, width, channel)
into (batch_size*height*width, channel).
mean : applies global average pooling on
each feature map
max : applies global max pooling on
each feature map
median : applies global median pooling on
each feature map
flatten : flattenes the entire feature map to a vector,
reshaping the data tensor into a data matrix
of shape (batch_size, height*width*channel).
This strategy for dealing with convolutions is
extremly memory intensive and will likely cause
memory and performance problems for any
non toy-problem
timeseries_method (str) : how to subsample timeseries methods. Default
is last_timestep.
supported methods are:
timestepwise : stacks each sample timestep-by-timestep
last_timestep : selects the last timestep's output
nosave (bool) : If True, disables saving artifacts (images), default is False
verbose (bool) : print saturation for every layer during training
sat_threshold (float): threshold used to determine the number of
eigendirections belonging to the latent space.
In effect, this is the threshold determining
the the intrinsic dimensionality. Default value
is 0.99 (99% of the explained variance), which
is a compromise between a good and interpretable
approximation. From experience the threshold
should be between 0.97 and 0.9995 for
meaningfull results.
verbose (bool) : Change verbosity level (default is 0)
device (str) : Device to do the computations on.
Default is cuda:0. Generally it is recommended
to do the computations
on the gpu in order to get maximum performance.
Using the cpu is generally slower but it lets
delve use regular RAM instead of the generally
more limited VRAM of the GPU.
Not having delve run on the same device as the
network causes slight performance decrease due
to copying memory between devices during each
forward pass.
Delve can handle models distributed on multiple
GPUs, however delve itself will always
run on a single device.
initial_epoch (int) : The initial epoch to start with. Default is 0,
which corresponds to a new run.
If initial_epoch != 0 the writers will
look for save states that they can resume.
If set to zero, all existing states
will be overwritten. If set to a lower epoch
than actually recorded the behavior of the
writers is undefined and may result in crashes,
loss of data or corrupted data.
interpolation_strategy (str) : Default is None (disabled). If set to a
string key accepted by the
model-argument of
torch.nn.functional.interpolate, the
feature map will be resized to match the
interpolated size. This is useful if
you work with large resolutions and want
to save up on computation time.
is done if the resolution is smaller.
interpolation_downsampling (int): Default is 32. The target resolution
if downsampling is enabled.
"""
def __init__(self,
savefile: str,
save_to: Union[str, delve.writers.AbstractWriter],
modules: torch.nn.Module,
writer_args: Optional[Dict[str, Any]] = None,
log_interval=1,
max_samples=None,
stats: list = ['lsat'],
layerwise_sat: bool = True,
reset_covariance: bool = True,
average_sat: bool = False,
ignore_layer_names: List[str] = [],
include_conv: bool = True,
conv_method: str = 'channelwise',
timeseries_method: str = 'last_timestep',
sat_threshold: str = .99,
nosave=False,
verbose: bool = False,
device='cuda:0',
initial_epoch: int = 0,
interpolation_strategy: Optional[str] = None,
interpolation_downsampling: int = 32):
self.nosave = nosave
self.verbose = verbose
# self.disable_compute: bool = False
self.include_conv = include_conv
self.conv_method = conv_method
self.timeseries_method = timeseries_method
self.threshold = sat_threshold
self.layers = self.get_layers_recursive(modules)
self.max_samples = max_samples
self.log_interval = log_interval
self.reset_covariance = reset_covariance
self.initial_epoch = initial_epoch
self.interpolation_strategy = interpolation_strategy
self.interpolation_downsampling = interpolation_downsampling
writer_args = writer_args or {}
writer_args['savepath'] = savefile
os.makedirs(savefile, exist_ok=True)
self.writer = self._get_writer(save_to, writer_args)
self.interval = log_interval
self._warn_if_covariance_not_saveable(stats)
self.logs, self.stats = self._check_stats(stats)
self.layerwise_sat = layerwise_sat
self.average_sat = average_sat
self.ignore_layer_names = ignore_layer_names
self.seen_samples = {'train': {}, 'eval': {}}
self.global_steps = 0
self.global_hooks_registered = False
self.is_notebook = None
self.device = device
self.record = True
for name, layer in self.layers.items():
if isinstance(layer, Conv2d) or isinstance(layer, Linear) \
or isinstance(layer, LSTM):
self._register_hooks(layer=layer,
layer_name=name,
interval=log_interval)
if self.initial_epoch != 0:
self.writer.resume_from_saved_state(self.initial_epoch)
def _warn_if_covariance_not_saveable(self, stats: List[str]):
warn = False
if 'cov' in stats:
if isinstance(self.writer, CompositWriter):
for writer in self.writer.writers:
if isinstance(writer, NPYWriter):
return
warn = True
elif not isinstance(self.writer, NPYWriter):
warn = True
if warn:
warnings.warn("'cov' was selected as stat, but 'npy' (NPYWriter)"
"is not used as a save strategy, which is the only"
"writer able to save the covariance matrix. The"
"training and logging will run normally, but the"
"covariance matrix will not be saved. Note that you"
"can add multiple writers by passing a list.")
def __getattr__(self, name):
if name.startswith('add_') and name != 'add_saturations':
if not self.nosave:
return getattr(self.writer, name)
else:
def noop(*args, **kwargs):
log.info(
f'Logging disabled, not logging: {args}, {kwargs}')
pass
return noop
else:
try:
# Redirect to writer object
return self.writer.__getattribute__(name)
except Exception:
# Default behaviour
return self.__getattribute__(name)
def __repr__(self):
return self.layers.keys().__repr__()
def is_recording(self) -> bool:
return self.record
def stop(self):
self.record = False
def resume(self):
self.record = True
def close(self):
"""User endpoint to close writer and progress bars."""
return self.writer.close()
def _format_saturation(self, saturation_status):
raise NotImplementedError
def _check_stats(self, stats: list):
if not isinstance(stats, list):
stats = list(stats)
supported_stats = [
'lsat',
'idim',
'cov',
'det',
'trc',
'dtrc',
'embed',
]
compatible = [
stat in supported_stats
if "_" not in stat else stat.split("_")[0] in stats
for stat in stats
]
incompatible = [i for i, x in enumerate(compatible) if not x]
assert all(compatible), "Stat {} is not supported".format(
stats[incompatible[0]])
name_mapper = STATMAP
logs = {
f'{mode}-{name_mapper[stat]}': OrderedDict()
for mode, stat in product(['train', 'eval'], ['cov'])
}
return logs, stats
def _add_conv_layer(self, layer: torch.nn.Module):
layer.out_features = layer.out_channels
layer.conv_method = self.conv_method
def _add_lstm_layer(self, layer: torch.nn.Module):
layer.out_features = layer.hidden_size
layer.timeseries_method = self.timeseries_method
def get_layer_from_submodule(self,
submodule: torch.nn.Module,
layers: dict,
name_prefix: str = ''):
if len(submodule._modules) > 0:
for idx, (name, subsubmodule) in \
enumerate(submodule._modules.items()):
new_prefix = name if name_prefix == '' else name_prefix + \
'-' + name
self.get_layer_from_submodule(subsubmodule, layers, new_prefix)
return layers
else:
layer_name = name_prefix
layer_type = layer_name
if not self._check_is_supported_layer(submodule):
log.info(f"Skipping {layer_type}")
return layers
if isinstance(submodule, Conv2d) and self.include_conv:
self._add_conv_layer(submodule)
layers[layer_name] = submodule
log.info('added layer {}'.format(layer_name))
return layers
def _check_is_supported_layer(self, layer: torch.nn.Module) -> bool:
return isinstance(layer, Conv2d) or isinstance(
layer, Linear) or isinstance(layer, LSTM)
def get_layers_recursive(self, modules: Union[list, torch.nn.Module]):
layers = {}
if not isinstance(modules, list) and not hasattr(
modules, 'out_features'):
# submodules = modules._modules # OrderedDict
layers = self.get_layer_from_submodule(modules, layers, '')
elif self._check_is_supported_layer(modules):
for module in modules:
layers = self.get_layer_from_submodule(module, layers,
type(module))
else:
for i, module in enumerate(modules):
layers = self.get_layer_from_submodule(
module, layers,
'' if not self._check_is_supported_layer(module) else
f'Module-{i}-{type(module).__name__}')
return layers
def _get_writer(self, save_to, writers_args) -> \
delve.writers.AbstractWriter:
"""Create a writer to log history to `writer_dir`."""
if issubclass(type(save_to), delve.writers.AbstractWriter):
return save_to
if isinstance(save_to, list):
all_writers = []
for saver in save_to:
all_writers.append(
self._get_writer(save_to=saver, writers_args=writers_args))
return CompositWriter(all_writers)
if save_to in WRITERS:
writer = WRITERS[save_to](**writers_args)
else:
raise ValueError(
'Illegal argument for save_to "{}"'.format(save_to))
return writer
def _register_hooks(self, layer: torch.nn.Module, layer_name: str,
interval):
layer.eval_layer_history = getattr(layer, 'eval_layer_history', list())
layer.train_layer_history = getattr(layer, 'train_layer_history',
list())
layer.layer_svd = getattr(layer, 'layer_svd', None)
layer.forward_iter = getattr(layer, 'forward_iter', 0)
layer.interval = getattr(layer, 'interval', interval)
layer.writer = getattr(layer, 'writer', self.writer)
layer.name = getattr(layer, 'name', layer_name)
self.register_forward_hooks(layer, self.stats)
return self
def _record_stat(self, activations_batch: torch.Tensor, lstm_ae: bool,
layer: torch.nn.Module, training_state: str, stat: str):
if activations_batch.dim() == 4: # conv layer (B x C x H x W)
if self.interpolation_strategy is not None and (
activations_batch.shape[3] >
self.interpolation_downsampling
or activations_batch.shape[2] >
self.interpolation_downsampling):
activations_batch = interpolate(
activations_batch,
size=self.interpolation_downsampling,
mode=self.interpolation_strategy)
if self.conv_method == 'median':
shape = activations_batch.shape
reshaped_batch = activations_batch.reshape(
shape[0], shape[1], shape[2] * shape[3])
activations_batch, _ = torch.median(reshaped_batch,
dim=2) # channel median
elif self.conv_method == 'max':
shape = activations_batch.shape
reshaped_batch = activations_batch.reshape(
shape[0], shape[1], shape[2] * shape[3])
activations_batch, _ = torch.max(reshaped_batch,
dim=2) # channel median
elif self.conv_method == 'mean':
activations_batch = torch.mean(activations_batch, dim=(2, 3))
elif self.conv_method == 'flatten':
activations_batch = activations_batch.view(
activations_batch.size(0), -1)
elif self.conv_method == 'channelwise':
reshaped_batch: torch.Tensor = activations_batch.permute(
[1, 0, 2, 3])
shape = reshaped_batch.shape
reshaped_batch: torch.Tensor = reshaped_batch.flatten(1)
reshaped_batch: torch.Tensor = reshaped_batch.permute([1, 0])
activations_batch = reshaped_batch
elif activations_batch.dim() == 3: # LSTM layer (B x T x U)
if self.timeseries_method == 'timestepwise':
activations_batch = activations_batch.flatten(1)
elif self.timeseries_method == 'last_timestep':
activations_batch = activations_batch[:, -1, :]
if layer.name not in self.logs[f'{training_state}-{stat}'] or (
not isinstance(self.logs[f'{training_state}-{stat}'],
TorchCovarianceMatrix) and self.record):
save_data = 'embed' in self.stats
self.logs[f'{training_state}-{stat}'][
layer.name] = TorchCovarianceMatrix(device=self.device,
save_data=save_data)
self.logs[f'{training_state}-{stat}'][layer.name].update(
activations_batch, lstm_ae)
def register_forward_hooks(self, layer: torch.nn.Module, stats: list):
"""Register hook to show `stats` in `layer`."""
def record_layer_saturation(layer: torch.nn.Module, input, output):
"""Hook to register in `layer` module."""
if not self.record:
if layer.name not in self.logs[
f'{"train" if layer.training else "eval"}-{"covariance-matrix"}']:
# save_data = 'embed' in self.stats
self.logs[
f'{"train" if layer.training else "eval"}-{"covariance-matrix"}'][
layer.name] = np.nan
return
# Increment step counter
layer.forward_iter += 1
# VAE output is a tuple; Hence output.data throw exception
lstm_ae = False
if layer.name in [
'encoder_lstm', 'encoder_output', 'decoder_lstm',
'decoder_output'
]:
output = output[1][0]
lstm_ae = True
elif isinstance(layer, torch.nn.LSTM):
output = output[0]
training_state = 'train' if layer.training else 'eval'
if layer.name not in self.seen_samples[training_state]:
self.seen_samples[training_state][layer.name] = 0
if (self.max_samples is None
or self.seen_samples[training_state][layer.name] <
self.max_samples
) and layer.forward_iter % self.log_interval == 0:
num_samples = min(
output.data.shape[0], self.max_samples -
self.seen_samples[training_state][layer.name]
) if self.max_samples is not None else output.data.shape[0]
activations_batch = output.data[:num_samples]
self.seen_samples[training_state][layer.name] += num_samples
self._record_stat(activations_batch, lstm_ae, layer,
training_state, 'covariance-matrix')
layer.register_forward_hook(record_layer_saturation)
def add_saturations(self, save=True):
"""
Computes saturation and saves all stats
:return:
"""
for key in self.logs:
train_sats = []
val_sats = []
for i, layer_name in enumerate(self.logs[key]):
if layer_name in self.ignore_layer_names:
continue
if self.record and self.logs[key][layer_name]._cov_mtx is None:
raise ValueError("Attempting to compute intrinsic"
"dimensionality when covariance"
"is not initialized")
if self.record:
cov_mat = self.logs[key][layer_name].fix()
log_values = {}
sample_log_values = {}
for stat in self.stats:
if stat == 'lsat':
log_values[key.replace(STATMAP['cov'], STATMAP['lsat'])
+ '_' + layer_name] = compute_saturation(
cov_mat, thresh=self.threshold
) if self.record else np.nan
elif stat == 'idim':
log_values[
key.replace(STATMAP['cov'], STATMAP['idim']) +
'_' +
layer_name] = compute_intrinsic_dimensionality(
cov_mat, thresh=self.threshold
) if self.record else np.nan
elif stat == 'cov':
log_values[key + '_' +
layer_name] = cov_mat.cpu().numpy()
elif stat == 'det':
log_values[key.replace(STATMAP['cov'], STATMAP['det'])
+ '_' +
layer_name] = compute_cov_determinant(
cov_mat) if self.record else np.nan
elif stat == 'trc':
log_values[key.replace(STATMAP['cov'], STATMAP['trc'])
+ '_' +
layer_name] = compute_cov_trace(cov_mat)
elif stat == 'dtrc':
log_values[key.replace(STATMAP['cov'], STATMAP['dtrc'])
+ '_' +
layer_name] = compute_diag_trace(cov_mat)
elif stat == 'embed':
transformation_matrix = torch.mm(
cov_mat[0:2].transpose(0, 1), cov_mat[0:2])
saved_samples = self.logs[key][
layer_name].saved_samples
sample_log_values['embed'] = list()
for (index, sample) in enumerate(saved_samples):
coord = torch.matmul(transformation_matrix, sample)
sample_log_values['embed'].append(
(coord[0], coord[1]))
self.seen_samples[key.split('-')[0]][layer_name] = 0
if self.reset_covariance and self.record:
self.logs[key][layer_name]._cov_mtx = None
if self.layerwise_sat:
self.writer.add_scalars(
prefix='',
value_dict=log_values,
sample_value_dict=sample_log_values)
if self.average_sat:
self.writer.add_scalar('average-train-sat', np.mean(train_sats))
self.writer.add_scalar('average-eval-sat', np.mean(val_sats))
if save:
self.save()
def save(self):
self.writer.save()
|
the-stack_0_11790 | from input_output.Loader import Loader
from joblib import load
# Loader specific for the Titanic task
# The loader loads the data
class TitanicLoader(Loader):
def load_split(self, training_data_file, test_data_file, verbose=False):
train, test = self.load_data(training_data_file, test_data_file)
test_labels = test['PassengerId']
X_train, Y_train = self.split_data(train)
if verbose:
print( "\n" + ('-' * 40) )
print( " Original data")
print( '-' * 40)
print( X_train.head() )
print ("Loaded dataset")
return X_train, Y_train, test, test_labels
def split_data(self, train):
# split the features and predector feature
train_X = train
train_Y = train_X["Survived"]
del train_X["Survived"]
return train_X, train_Y
def load_pkl(self, file_name):
return load(file_name)
|
the-stack_0_11791 | # coding: utf-8
import pprint
import re
import six
class StartRecyclePolicyRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'x_language': 'str',
'body': 'RecyclePolicyRequestBody'
}
attribute_map = {
'x_language': 'X-Language',
'body': 'body'
}
def __init__(self, x_language=None, body=None):
"""StartRecyclePolicyRequest - a model defined in huaweicloud sdk"""
self._x_language = None
self._body = None
self.discriminator = None
if x_language is not None:
self.x_language = x_language
if body is not None:
self.body = body
@property
def x_language(self):
"""Gets the x_language of this StartRecyclePolicyRequest.
:return: The x_language of this StartRecyclePolicyRequest.
:rtype: str
"""
return self._x_language
@x_language.setter
def x_language(self, x_language):
"""Sets the x_language of this StartRecyclePolicyRequest.
:param x_language: The x_language of this StartRecyclePolicyRequest.
:type: str
"""
self._x_language = x_language
@property
def body(self):
"""Gets the body of this StartRecyclePolicyRequest.
:return: The body of this StartRecyclePolicyRequest.
:rtype: RecyclePolicyRequestBody
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this StartRecyclePolicyRequest.
:param body: The body of this StartRecyclePolicyRequest.
:type: RecyclePolicyRequestBody
"""
self._body = body
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, StartRecyclePolicyRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
the-stack_0_11793 | from somerandomapi.sync_async_handler import SyncAsyncHandler
from somerandomapi import http
def welcome(
key: str,
image: int,
background: str,
type: str,
avatar: str,
username: str,
discriminator: int,
guild_name: str,
text_color: str,
member_count: int,
):
"""
Docs: https://some-random-api.ml/docs/canvas/welcome
- key
- This endpoint requires a key to use but even if the key is expired it is fine.
- image
- It must be between 1 and 7.
- background
- Must be one of these:
- stars
- stars2
- rainbowgradient
- rainbow
- sunset
- night
- blobday
- blobnight
- space
- gaming1
- gaming3
- gaming2
- gaming4
- type
- Could be either `join` or `leave`
- avatar
- username
- Maximum 30 characters
- discriminator
- guild_name
- text_color
- member_count
"""
return SyncAsyncHandler(
get_welcome,
async_get_welcome,
key=key,
image=image,
background=background,
type=type,
avatar=avatar,
username=username,
discriminator=discriminator,
guildName=guild_name,
textcolor=text_color,
memberCount=member_count,
)
class Query:
def __init__(self, queries):
self.__dict__.update(queries)
async def async_get_welcome(**queries):
query = Query(queries)
queries.pop("image", None)
queries.pop("background", None)
async with http.GET(
("welcome", "img", str(query.image), query.background), queries
) as response:
return response
def get_welcome(**queries):
query = Query(queries)
queries.pop("image", None)
queries.pop("background", None)
with http.GET(("img", query.image, query.background), queries) as response:
return response
|
the-stack_0_11794 | import os
import json
import nltk
import random
import re
classes_under_consideration = ['ynQuestion','whQuestion','Greet','Statement','Emotion']
out_dir = './../res/data/nps_chat_dataset'
if not os.path.exists(out_dir):
os.makedirs(out_dir)
posts = nltk.corpus.nps_chat.xml_posts()[:]
dataset = {}
for post in posts:
_class = post.get('class')
if _class not in classes_under_consideration:
continue
text = " "
for word in nltk.word_tokenize(post.text):
if not re.search('user', word, re.IGNORECASE):
text = text + " " + word.lower()
text = text.strip()
if dataset.get(_class) == None:
dataset[_class] = []
if _class not in ['ynQuestion','whQuestion'] and len(text) > 3:
dataset[_class].append(text)
elif _class in ['ynQuestion','whQuestion']:
dataset[_class].append(text)
for _class, texts in dataset.items():
texts = random.sample(texts,533)
file_name = '{}.txt'.format(_class)
with open(os.path.join(out_dir,file_name), 'w') as f:
f.write('\n'.join(texts)) |
the-stack_0_11795 | # %%
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from ..config import (
device,
experiment_folder,
second_stage,
second_stage_dataset,
)
from ..model import LanguageGenerator, SentenceDecoderWithAttention, TermEncoder
from .misc import extract_caption_len
# In case of "RuntimeError: received 0 items of ancdata"
# https://github.com/pytorch/pytorch/issues/973
# torch.multiprocessing.set_sharing_strategy("file_system")
def train(model, dataset, mapping, criterion, optimizer, writer, epoch):
dataloader = DataLoader(
dataset, batch_size=second_stage["batch_size"], num_workers=4, shuffle=True
)
model = model.train().to(device)
running_loss = 0
for i, data in enumerate(tqdm(dataloader, desc="Batches")):
caps, terms = data
caps, terms = torch.stack(caps).to(device), torch.stack(terms).to(device)
caps, clens = extract_caption_len(caps.T)
terms, tlens = extract_caption_len(terms.T)
targets = caps.detach().clone()[:, 1:]
optimizer.zero_grad()
out, hidden, attn = model(terms, tlens, caps[:, :-1], clens + 1) # add <start>
loss = criterion(out.permute(0, 2, 1), targets)
loss.backward()
optimizer.step()
running_loss += loss.item()
if i % 50 == 49:
step_number = epoch * len(dataloader) + i
writer.add_scalar("Training loss", running_loss / 50, step_number)
running_loss = 0
return model
def main():
dataset = second_stage_dataset()
writer = SummaryWriter(experiment_folder)
cmapping, tmapping = dataset.get_cap_mapping, dataset.get_term_mapping
enc = TermEncoder(len(tmapping), 2048)
dec = SentenceDecoderWithAttention(len(cmapping), 2048, len(cmapping))
lang = LanguageGenerator(enc, dec)
criterion = nn.NLLLoss(ignore_index=0)
optimizer = torch.optim.Adam(lang.parameters(), lr=second_stage["learning_rate"])
for i in range(second_stage["epochs"]):
print(f"Epoch {i}")
lang = train(lang, dataset, cmapping, criterion, optimizer, writer, i)
torch.save(lang.state_dict(), experiment_folder / f"language_ep{i:03d}.pth")
if __name__ == "__main__":
main()
# %%
|
the-stack_0_11797 | #!/usr/bin/env python
import colorsys
import math
import time
import unicornhathd
print("""Unicorn HAT HD: demo.py
This pixel shading demo transitions between 4 classic graphics demo effects.
Press Ctrl+C to exit!
""")
unicornhathd.rotation(0)
u_width, u_height = unicornhathd.get_shape()
# Generate a lookup table for 8bit hue to RGB conversion
hue_to_rgb = []
for i in range(0, 255):
hue_to_rgb.append(colorsys.hsv_to_rgb(i / 255.0, 1, 1))
def gradient(x, y, step):
g = x * 16
b = y * 16
r = 255 - (x * 16)
return (r, g, b)
# twisty swirly goodness
def swirl(x, y, step):
x -= (u_width / 2)
y -= (u_height / 2)
dist = math.sqrt(pow(x, 2) + pow(y, 2)) / 2.0
angle = (step / 10.0) + (dist * 1.5)
s = math.sin(angle)
c = math.cos(angle)
xs = x * c - y * s
ys = x * s + y * c
r = abs(xs + ys)
r = r * 12.0
r -= 20
return (r, r + (s * 130), r + (c * 130))
# roto-zooming checker board
def checker(x, y, step):
x -= (u_width / 2)
y -= (u_height / 2)
angle = (step / 10.0)
s = math.sin(angle)
c = math.cos(angle)
xs = x * c - y * s
ys = x * s + y * c
xs -= math.sin(step / 200.0) * 40.0
ys -= math.cos(step / 200.0) * 40.0
scale = step % 20
scale /= 20
scale = (math.sin(step / 50.0) / 8.0) + 0.25
xs *= scale
ys *= scale
xo = abs(xs) - int(abs(xs))
yo = abs(ys) - int(abs(ys))
v = 0 if (math.floor(xs) + math.floor(ys)) % 2 else 1 if xo > .1 and yo > .1 else .5
r, g, b = hue_to_rgb[step % 255]
return (r * (v * 255), g * (v * 255), b * (v * 255))
# weeee waaaah
def blues_and_twos(x, y, step):
x -= (u_width / 2)
y -= (u_height / 2)
scale = math.sin(step / 6.0) / 1.5
r = math.sin((x * scale) / 1.0) + math.cos((y * scale) / 1.0)
b = math.sin(x * scale / 2.0) + math.cos(y * scale / 2.0)
g = r - .8
g = 0 if g < 0 else g
b -= r
b /= 1.4
return (r * 255, (b + g) * 255, g * 255)
# rainbow search spotlights
def rainbow_search(x, y, step):
xs = math.sin((step) / 100.0) * 20.0
ys = math.cos((step) / 100.0) * 20.0
scale = ((math.sin(step / 60.0) + 1.0) / 5.0) + 0.2
r = math.sin((x + xs) * scale) + math.cos((y + xs) * scale)
g = math.sin((x + xs) * scale) + math.cos((y + ys) * scale)
b = math.sin((x + ys) * scale) + math.cos((y + ys) * scale)
return (r * 255, g * 255, b * 255)
# zoom tunnel
def tunnel(x, y, step):
speed = step / 100.0
x -= (u_width / 2)
y -= (u_height / 2)
xo = math.sin(step / 27.0) * 2
yo = math.cos(step / 18.0) * 2
x += xo
y += yo
if y == 0:
if x < 0:
angle = -(math.pi / 2)
else:
angle = (math.pi / 2)
else:
angle = math.atan(x / y)
if y > 0:
angle += math.pi
angle /= 2 * math.pi # convert angle to 0...1 range
hyp = math.sqrt(math.pow(x, 2) + math.pow(y, 2))
shade = hyp / 2.1
shade = 1 if shade > 1 else shade
angle += speed
depth = speed + (hyp / 10)
col1 = hue_to_rgb[step % 255]
col1 = (col1[0] * 0.8, col1[1] * 0.8, col1[2] * 0.8)
col2 = hue_to_rgb[step % 255]
col2 = (col2[0] * 0.3, col2[1] * 0.3, col2[2] * 0.3)
col = col1 if int(abs(angle * 6.0)) % 2 == 0 else col2
td = .3 if int(abs(depth * 3.0)) % 2 == 0 else 0
col = (col[0] + td, col[1] + td, col[2] + td)
col = (col[0] * shade, col[1] * shade, col[2] * shade)
return (col[0] * 255, col[1] * 255, col[2] * 255)
def current_milli_time():
return int(round(time.time() * 1000))
effects = [gradient, tunnel, rainbow_search, checker, swirl]
step = 0
try:
while True:
for i in range(100):
start = current_milli_time()
for y in range(u_height):
for x in range(u_width):
r, g, b = effects[0](x, y, step)
if i > 75:
r2, g2, b2 = effects[-1](x, y, step)
ratio = (100.00 - i) / 25.0
r = r * ratio + r2 * (1.0 - ratio)
g = g * ratio + g2 * (1.0 - ratio)
b = b * ratio + b2 * (1.0 - ratio)
r = int(max(0, min(255, r)))
g = int(max(0, min(255, g)))
b = int(max(0, min(255, b)))
unicornhathd.set_pixel(x, y, r, g, b)
step += 2
unicornhathd.show()
effect = effects.pop()
effects.insert(0, effect)
except KeyboardInterrupt:
unicornhathd.off()
|
the-stack_0_11799 | #!/usr/bin/env python
import sys, os , socket, random, struct, time
import argparse
from scapy.all import sendp, send, get_if_list, get_if_hwaddr, bind_layers
from scapy.all import Packet
from scapy.all import Ether, IP, UDP, TCP, Raw
from scapy.fields import *
SRC = 0
DST = 1
DSCP = 2
BOS = 0
LABEL1 = 1
SWITCH_ID = 0
TIMESTAMP = 1
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('-e', '--ethernet', type=str, help='Ethernet src/dst addresses')
parser.add_argument('-m', '--mpls', type=str, help='Enable MPLS header and add parameters')
parser.add_argument('-i', '--ip', type=str, help='Add IPv4 parameters')
parser.add_argument('-t', '--tcp', type=int, action='store', help='Enable TCP header and add parameters')
parser.add_argument('-u', '--udp', type=int, action='store', help='Enable UDP header and add parameters')
parser.add_argument('-p', '--packets', type=int, action='store', help='Number of packets to send')
parser.add_argument('-b', '--bytes', type=int, action='store', help='Bytes for the payload')
parser.add_argument('-r', '--randbytes', const=True, action='store_const', help='Add random bytes to the payload')
parser.add_argument('-f', '--filename', type=str, help='Path for the filename')
parser.add_argument('-c', '--interface', type=str, help='Name of the interface to send the packet to')
parser.add_argument('-n', '--int', type=str, help='Add INT header')
args = parser.parse_args()
class MPLS(Packet):
name = "MPLS"
fields_desc = [
BitField("label", 1000, 20),
BitField("exp", 0, 3),
BitField("bos", 1, 1),
ByteField("ttl", 0)
]
class INT(Packet):
name = "INT"
fields_desc = [
BitField("egress_timestamp", 5, 64) #name, default, size
]
bind_layers(Ether, IP, type=0x0800)
bind_layers(IP, INT, protocol=0xFE)
def main():
if args.ethernet:
ethernetParams = [p for p in args.ethernet.split(',')]
if args.ip:
ipParams = [p for p in args.ip.split(',')]
#outF = open(fileName, "a")
print("Sending packets on interface %s" % (args.interface))
pkt = Ether(src=ethernetParams[SRC], dst=ethernetParams[DST])
pkt = pkt / IP(src=ipParams[SRC], dst=ipParams[DST], tos=int(ipParams[DSCP], 0) << 2)
if args.int:
pkt = pkt / INT(egress_timestamp = 7) # el "/" es para hacer append
if args.udp:
pkt = pkt / UDP(sport=0, dport=args.udp)
if args.tcp:
pkt = pkt / TCP(sport=0, dport=args.tcp)
if args.bytes:
if args.randbytes:
pkt = pkt / Raw(load=bytearray(os.urandom(args.bytes)))
else:
pkt = pkt / Raw(load=bytearray([0] * args.bytes) )
for i in range(args.packets):
#pkt.show()
#t = time.time_ns()
if args.udp:
pkt[UDP].sport = i+1
if args.tcp:
pkt[TCP].sport = i+1
sendp(pkt, iface=args.interface, verbose=False)
print("Sent packet: " + str(i+1))
time.sleep(0.3)
if __name__ == '__main__':
main() |
the-stack_0_11800 | #%%
import numpy as np
import pandas as pd
import tqdm
import vdj.io
import vdj.bayes
import vdj.stats
# Load data and stan model
data = pd.read_csv('../../data/compiled_dwell_times.csv')
model = vdj.bayes.StanModel('../stan/pooled_exponential_sum.stan', force_compile=True)
#%%
# Iterate through the data and fit while storing thinned samples
samps_df = []
stats_df = []
for g, d in tqdm.tqdm(data.groupby(['mutant', 'salt'])):
_, samples = model.sample({'N':len(d), 'dwell':d['dwell_time_min']},
control=dict(adapt_delta=0.9), iter=5000)
stats = model.summary()
# Parse the mutant
mut = vdj.io.mutation_parser(g[0])
# Add identifiers and append
samples['mutant'] = g[0]
samples['seq'] = mut['seq']
samples['n_muts'] = mut['n_muts']
samples['salt'] = g[1]
stats['mutant'] = g[0]
stats['seq'] = mut['seq']
stats['n_muts'] = mut['n_muts']
stats['salt'] = g[1]
samps_df.append(samples.iloc[::10])
stats_df.append(stats)
# Concatenate and save the dataframes
samps = pd.concat(samps_df)
samps.to_csv('../../data/sum_expon_samples.csv', index=False)
stats = pd.concat(stats_df)
stats.to_csv('../../data/sum_expon_summary.csv', index=False)
#%%
|
the-stack_0_11801 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implements triplet loss."""
import tensorflow as tf
from tensorflow_addons.losses import metric_learning
from tensorflow_addons.utils.keras_utils import LossFunctionWrapper
from tensorflow_addons.utils.types import FloatTensorLike, TensorLike
from typeguard import typechecked
from typing import Optional, Union, Callable
def _masked_maximum(data, mask, dim=1):
"""Computes the axis wise maximum over chosen elements.
Args:
data: 2-D float `Tensor` of size [n, m].
mask: 2-D Boolean `Tensor` of size [n, m].
dim: The dimension over which to compute the maximum.
Returns:
masked_maximums: N-D `Tensor`.
The maximized dimension is of size 1 after the operation.
"""
axis_minimums = tf.math.reduce_min(data, dim, keepdims=True)
masked_maximums = (
tf.math.reduce_max(
tf.math.multiply(data - axis_minimums, mask), dim, keepdims=True
)
+ axis_minimums
)
return masked_maximums
def _masked_minimum(data, mask, dim=1):
"""Computes the axis wise minimum over chosen elements.
Args:
data: 2-D float `Tensor` of size [n, m].
mask: 2-D Boolean `Tensor` of size [n, m].
dim: The dimension over which to compute the minimum.
Returns:
masked_minimums: N-D `Tensor`.
The minimized dimension is of size 1 after the operation.
"""
axis_maximums = tf.math.reduce_max(data, dim, keepdims=True)
masked_minimums = (
tf.math.reduce_min(
tf.math.multiply(data - axis_maximums, mask), dim, keepdims=True
)
+ axis_maximums
)
return masked_minimums
@tf.keras.utils.register_keras_serializable(package="Addons")
@tf.function
def triplet_semihard_loss(
y_true: TensorLike,
y_pred: TensorLike,
margin: FloatTensorLike = 1.0,
distance_metric: Union[str, Callable] = "L2",
) -> tf.Tensor:
"""Computes the triplet loss with semi-hard negative mining.
Args:
y_true: 1-D integer `Tensor` with shape [batch_size] of
multiclass integer labels.
y_pred: 2-D float `Tensor` of embedding vectors. Embeddings should
be l2 normalized.
margin: Float, margin term in the loss definition.
distance_metric: str or function, determines distance metric:
"L1" for l1-norm distance
"L2" for l2-norm distance
"angular" for cosine similarity
A custom function returning a 2d adjacency
matrix of a chosen distance metric can
also be passed here. e.g.
def custom_distance(batch):
batch = 1 - batch @ batch.T
return batch
triplet_semihard_loss(batch, labels,
distance_metric=custom_distance
)
Returns:
triplet_loss: float scalar with dtype of y_pred.
"""
labels, embeddings = y_true, y_pred
convert_to_float32 = (
embeddings.dtype == tf.dtypes.float16 or embeddings.dtype == tf.dtypes.bfloat16
)
precise_embeddings = (
tf.cast(embeddings, tf.dtypes.float32) if convert_to_float32 else embeddings
)
# Reshape label tensor to [batch_size, 1].
lshape = tf.shape(labels)
labels = tf.reshape(labels, [lshape[0], 1])
# Build pairwise squared distance matrix
if distance_metric == "L1":
pdist_matrix = metric_learning.pairwise_distance(
precise_embeddings, squared=False
)
elif distance_metric == "L2":
pdist_matrix = metric_learning.pairwise_distance(
precise_embeddings, squared=True
)
elif distance_metric == "angular":
pdist_matrix = metric_learning.angular_distance(precise_embeddings)
else:
pdist_matrix = distance_metric(precise_embeddings)
# Build pairwise binary adjacency matrix.
adjacency = tf.math.equal(labels, tf.transpose(labels))
# Invert so we can select negatives only.
adjacency_not = tf.math.logical_not(adjacency)
batch_size = tf.size(labels)
# Compute the mask.
pdist_matrix_tile = tf.tile(pdist_matrix, [batch_size, 1])
mask = tf.math.logical_and(
tf.tile(adjacency_not, [batch_size, 1]),
tf.math.greater(
pdist_matrix_tile, tf.reshape(tf.transpose(pdist_matrix), [-1, 1])
),
)
mask_final = tf.reshape(
tf.math.greater(
tf.math.reduce_sum(
tf.cast(mask, dtype=tf.dtypes.float32), 1, keepdims=True
),
0.0,
),
[batch_size, batch_size],
)
mask_final = tf.transpose(mask_final)
adjacency_not = tf.cast(adjacency_not, dtype=tf.dtypes.float32)
mask = tf.cast(mask, dtype=tf.dtypes.float32)
# negatives_outside: smallest D_an where D_an > D_ap.
negatives_outside = tf.reshape(
_masked_minimum(pdist_matrix_tile, mask), [batch_size, batch_size]
)
negatives_outside = tf.transpose(negatives_outside)
# negatives_inside: largest D_an.
negatives_inside = tf.tile(
_masked_maximum(pdist_matrix, adjacency_not), [1, batch_size]
)
semi_hard_negatives = tf.where(mask_final, negatives_outside, negatives_inside)
loss_mat = tf.math.add(margin, pdist_matrix - semi_hard_negatives)
mask_positives = tf.cast(adjacency, dtype=tf.dtypes.float32) - tf.linalg.diag(
tf.ones([batch_size])
)
# In lifted-struct, the authors multiply 0.5 for upper triangular
# in semihard, they take all positive pairs except the diagonal.
num_positives = tf.math.reduce_sum(mask_positives)
triplet_loss = tf.math.truediv(
tf.math.reduce_sum(
tf.math.maximum(tf.math.multiply(loss_mat, mask_positives), 0.0)
),
num_positives,
)
if convert_to_float32:
return tf.cast(triplet_loss, embeddings.dtype)
else:
return triplet_loss
@tf.keras.utils.register_keras_serializable(package="Addons")
@tf.function
def triplet_hard_loss(
y_true: TensorLike,
y_pred: TensorLike,
margin: FloatTensorLike = 1.0,
soft: bool = False,
distance_metric: Union[str, Callable] = "L2",
) -> tf.Tensor:
"""Computes the triplet loss with hard negative and hard positive mining.
Args:
y_true: 1-D integer `Tensor` with shape [batch_size] of
multiclass integer labels.
y_pred: 2-D float `Tensor` of embedding vectors. Embeddings should
be l2 normalized.
margin: Float, margin term in the loss definition.
soft: Boolean, if set, use the soft margin version.
distance_metric: str or function, determines distance metric:
"L1" for l1-norm distance
"L2" for l2-norm distance
"angular" for cosine similarity
A custom function returning a 2d adjacency
matrix of a chosen distance metric can
also be passed here. e.g.
def custom_distance(batch):
batch = 1 - batch @ batch.T
return batch
triplet_semihard_loss(batch, labels,
distance_metric=custom_distance
)
Returns:
triplet_loss: float scalar with dtype of y_pred.
"""
labels, embeddings = y_true, y_pred
convert_to_float32 = (
embeddings.dtype == tf.dtypes.float16 or embeddings.dtype == tf.dtypes.bfloat16
)
precise_embeddings = (
tf.cast(embeddings, tf.dtypes.float32) if convert_to_float32 else embeddings
)
# Reshape label tensor to [batch_size, 1].
lshape = tf.shape(labels)
labels = tf.reshape(labels, [lshape[0], 1])
# Build pairwise squared distance matrix.
if distance_metric == "L1":
pdist_matrix = metric_learning.pairwise_distance(
precise_embeddings, squared=False
)
elif distance_metric == "L2":
pdist_matrix = metric_learning.pairwise_distance(
precise_embeddings, squared=True
)
elif distance_metric == "angular":
pdist_matrix = metric_learning.angular_distance(precise_embeddings)
else:
pdist_matrix = distance_metric(precise_embeddings)
# Build pairwise binary adjacency matrix.
adjacency = tf.math.equal(labels, tf.transpose(labels))
# Invert so we can select negatives only.
adjacency_not = tf.math.logical_not(adjacency)
adjacency_not = tf.cast(adjacency_not, dtype=tf.dtypes.float32)
# hard negatives: smallest D_an.
hard_negatives = _masked_minimum(pdist_matrix, adjacency_not)
batch_size = tf.size(labels)
adjacency = tf.cast(adjacency, dtype=tf.dtypes.float32)
mask_positives = tf.cast(adjacency, dtype=tf.dtypes.float32) - tf.linalg.diag(
tf.ones([batch_size])
)
# hard positives: largest D_ap.
hard_positives = _masked_maximum(pdist_matrix, mask_positives)
if soft:
triplet_loss = tf.math.log1p(tf.math.exp(hard_positives - hard_negatives))
else:
triplet_loss = tf.maximum(hard_positives - hard_negatives + margin, 0.0)
# Get final mean triplet loss
triplet_loss = tf.reduce_mean(triplet_loss)
if convert_to_float32:
return tf.cast(triplet_loss, embeddings.dtype)
else:
return triplet_loss
@tf.keras.utils.register_keras_serializable(package="Addons")
class TripletSemiHardLoss(LossFunctionWrapper):
"""Computes the triplet loss with semi-hard negative mining.
The loss encourages the positive distances (between a pair of embeddings
with the same labels) to be smaller than the minimum negative distance
among which are at least greater than the positive distance plus the
margin constant (called semi-hard negative) in the mini-batch.
If no such negative exists, uses the largest negative distance instead.
See: https://arxiv.org/abs/1503.03832.
We expect labels `y_true` to be provided as 1-D integer `Tensor` with shape
[batch_size] of multi-class integer labels. And embeddings `y_pred` must be
2-D float `Tensor` of l2 normalized embedding vectors.
Args:
margin: Float, margin term in the loss definition. Default value is 1.0.
name: Optional name for the op.
"""
@typechecked
def __init__(
self,
margin: FloatTensorLike = 1.0,
distance_metric: Union[str, Callable] = "L2",
name: Optional[str] = None,
**kwargs
):
super().__init__(
triplet_semihard_loss,
name=name,
reduction=tf.keras.losses.Reduction.NONE,
margin=margin,
distance_metric=distance_metric,
)
@tf.keras.utils.register_keras_serializable(package="Addons")
class TripletHardLoss(LossFunctionWrapper):
"""Computes the triplet loss with hard negative and hard positive mining.
The loss encourages the maximum positive distance (between a pair of embeddings
with the same labels) to be smaller than the minimum negative distance plus the
margin constant in the mini-batch.
The loss selects the hardest positive and the hardest negative samples
within the batch when forming the triplets for computing the loss.
See: https://arxiv.org/pdf/1703.07737.
We expect labels `y_true` to be provided as 1-D integer `Tensor` with shape
[batch_size] of multi-class integer labels. And embeddings `y_pred` must be
2-D float `Tensor` of l2 normalized embedding vectors.
Args:
margin: Float, margin term in the loss definition. Default value is 1.0.
soft: Boolean, if set, use the soft margin version. Default value is False.
name: Optional name for the op.
"""
@typechecked
def __init__(
self,
margin: FloatTensorLike = 1.0,
soft: bool = False,
distance_metric: Union[str, Callable] = "L2",
name: Optional[str] = None,
**kwargs
):
super().__init__(
triplet_hard_loss,
name=name,
reduction=tf.keras.losses.Reduction.NONE,
margin=margin,
soft=soft,
distance_metric=distance_metric,
)
|
the-stack_0_11803 | """empty message
Revision ID: 9b9102347500
Revises: 7ede01846a31
Create Date: 2019-08-14 12:26:40.368422
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '9b9102347500'
down_revision = '7ede01846a31'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('study', sa.Column('image_url', sa.String(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('study', 'image_url')
# ### end Alembic commands ###
|
the-stack_0_11806 | """market_access_python_frontend URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import include, path
urlpatterns = []
if settings.DEBUG and settings.DJANGO_ENV == "local":
urlpatterns += [
path("admin/", admin.site.urls),
]
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += [
path("", include("users.urls", namespace="users")),
path("", include("barriers.urls", namespace="barriers")),
path("", include("reports.urls", namespace="reports")),
path("", include("core.urls", namespace="core")),
path("", include("healthcheck.urls", namespace="healthcheck")),
]
|
the-stack_0_11807 | import flavio
from wilson import Wilson
import wcxf
from flavio.statistics.likelihood import Likelihood, FastLikelihood
from flavio.statistics.probability import NormalDistribution
from flavio.statistics.functions import pull, pvalue
import warnings
import pandas as pd
import numpy as np
from collections import OrderedDict
from math import ceil
from .util import tree, get_datapath
from . import ckm
from multipledispatch import dispatch
from copy import copy
import os
# by default, smelli uses leading log accuracy for SMEFT running!
Wilson.set_default_option('smeft_accuracy', 'leadinglog')
class GlobalLikelihood(object):
"""Class that provides a global likelihood in SMEFT Wilson
coefficient space.
User methods:
- `log_likelihood`: return an instance of LieklihoodResult
given a dictionary of Wilson coefficients at a given scale
- `log_likelihood_wcxf`: return an instance of LieklihoodResult
given the path to a WCxf file
- `log_likelihood_wilson`: return an instance of LieklihoodResult+
given an instance of `wilson.Wilson`
Utility methods:
- `make_measurement`: compute the SM covariances. Note that it is only
necessary to call this method when changes to the default
parameters/uncertainties have been made
- `save_sm_covariances`, `load_sm_covariances`: Save the calculated SM
covariances or load them from data files
- `save_exp_covariances`, `load_exp_covariances`: Save the calculated
experimental central values and covariances or load them from data files
"""
_default_bases = {'SMEFT': 'Warsaw', 'WET': 'flavio'}
_fast_likelihoods_yaml = [
'fast_likelihood_quarks.yaml',
'fast_likelihood_leptons.yaml'
]
_likelihoods_yaml = [
'likelihood_ewpt.yaml',
'likelihood_lept.yaml',
'likelihood_rd_rds.yaml',
'likelihood_lfu_fccc.yaml',
'likelihood_lfu_fcnc.yaml',
'likelihood_bcpv.yaml',
'likelihood_bqnunu.yaml',
'likelihood_lfv.yaml',
'likelihood_zlfv.yaml',
]
def __init__(self, eft='SMEFT', basis=None,
par_dict=None,
include_likelihoods=None,
exclude_likelihoods=None,
Nexp=5000,
exp_cov_folder=None,
sm_cov_folder=None,
custom_likelihoods=None,
fix_ckm=False):
"""Initialize the likelihood.
Optionally, a dictionary of parameters can be passed as `par_dict`.
If not given (or not complete), flavio default parameter values will
be used. Note that the CKM elements in `par_dict` will be ignored as
the "true" CKM elements will be extracted for each parameter point
from the measurement of four input observables:
- `'RKpi(P+->munu)'`
- `'BR(B+->taunu)'`
- `'BR(B->Xcenu)'`
- `'DeltaM_d/DeltaM_s'`
Parameters:
- eft: a WCxf EFT, must be one of 'SMEFT' (default) or 'WET'.
- basis: a WCxf basis, defaults to 'Warsaw' for SMEFT and 'flavio'
for WET.
- include_likelihoods: a list of strings specifying the likelihoods
to be included (default: all of them). Note that this cannot be used
to add likelihoods.
- exclude_likelihoods: a list of strings specifying the likelihoods
to be excluded (default: none of them).
- Nexp: number of random evaluations of the experimental likelihood
used to extract the covariance matrix for "fast likelihood"
instances. Defaults to 5000.
- exp_cov_folder: directory containing saved expererimental
covariances. The data files have to be in the format exported by
`save_exp_covariances`.
- sm_cov_folder: directory containing saved SM
covariances. The data files have to be in the format exported by
`save_sm_covariances`.
- custom_likelihoods: a dictionary in which each value is a list of
observables and each key is a string that serves as user-defined
name. For each item of the dictionary, a custom likelihood will be
computed.
- fix_ckm: If False (default), automatically determine the CKM elements
in the presence of new physics in processes used to determine these
elements in the SM. If set to True, the CKM elements are fixed to
their SM values, which can lead to inconsistent results, but also
to a significant speedup in specific cases.
"""
self.eft = eft
self.basis = basis or self._default_bases[self.eft]
par_dict = par_dict or {} # initialize empty if not given
# take missing parameters from flavio defaults
self.par_dict_default = flavio.default_parameters.get_central_all()
self.par_dict_default.update(par_dict)
self._par_dict_sm = None
self.fix_ckm = fix_ckm
self.likelihoods = {}
self.fast_likelihoods = {}
self._custom_likelihoods_dict = custom_likelihoods or {}
self.custom_likelihoods = {}
self._load_likelihoods(include_likelihoods=include_likelihoods,
exclude_likelihoods=exclude_likelihoods)
self._Nexp = Nexp
if exp_cov_folder is not None:
self.load_exp_covariances(exp_cov_folder)
self._sm_cov_loaded = False
try:
if sm_cov_folder is None:
self.load_sm_covariances(get_datapath('smelli', 'data/cache'))
else:
self.load_sm_covariances(sm_cov_folder)
self._sm_cov_loaded = True
self.make_measurement()
except (KeyboardInterrupt, SystemExit):
raise
except:
warnings.warn("There was a problem loading the SM covariances. "
"Please recompute them with `make_measurement`.")
self._log_likelihood_sm = None
self._obstable_sm = None
def _load_likelihoods(self,
include_likelihoods=None,
exclude_likelihoods=None):
if include_likelihoods is not None and exclude_likelihoods is not None:
raise ValueError("include_likelihoods and exclude_likelihoods "
"should not be specified simultaneously.")
for fn in self._fast_likelihoods_yaml:
if include_likelihoods is not None and fn not in include_likelihoods:
continue
if exclude_likelihoods is not None and fn in exclude_likelihoods:
continue
with open(self._get_likelihood_path(fn), 'r') as f:
L = FastLikelihood.load(f)
self.fast_likelihoods[fn] = L
for fn in self._likelihoods_yaml:
if include_likelihoods is not None and fn not in include_likelihoods:
continue
if exclude_likelihoods is not None and fn in exclude_likelihoods:
continue
if self.eft != 'SMEFT' and fn in ['likelihood_ewpt.yaml',
'likelihood_zlfv.yaml',]:
continue
with open(self._get_likelihood_path(fn), 'r') as f:
L = Likelihood.load(f)
self.likelihoods[fn] = L
for name, observables in self._custom_likelihoods_dict.items():
L = CustomLikelihood(self, observables)
self.custom_likelihoods['custom_' + name] = L
def _get_likelihood_path(self, name):
"""Return a path for the likelihood specified by `name`.
If a YAML file with that name is found in the package's data
directory, that is used. Otherwise, `name` is assumed to be a path.
Raises `FileNotFoundError` if path does not exists.
"""
path = get_datapath('smelli', 'data/yaml/' + name)
if os.path.exists(path):
return path
path = get_datapath('smelli', 'data/yaml/' + name + '.yaml')
if os.path.exists(path):
return path
if os.path.exists(name):
return name
if os.path.exists(name + '.yaml'):
return name + '.yaml'
else:
raise FileNotFoundError("Likelihood YAML file '{}' was not found".format(name))
def make_measurement(self, *args, **kwargs):
"""Initialize the likelihood by producing a pseudo-measurement containing both
experimental uncertainties as well as theory uncertainties stemming
from nuisance parameters.
Optional parameters:
- `N`: number of random computations for the SM covariance (computing
time is proportional to it; more means less random fluctuations.)
- `Nexp`: number of random computations for the experimental covariance.
This is much less expensive than the theory covariance, so a large
number can be afforded (default: 5000).
- `threads`: number of parallel threads for the SM
covariance computation. Defaults to 1 (no parallelization).
- `force`: if True, will recompute SM covariance even if it
already has been computed. Defaults to False.
- `force_exp`: if True, will recompute experimental central values and
covariance even if they have already been computed. Defaults to False.
"""
if 'Nexp' not in kwargs:
kwargs['Nexp'] = self._Nexp
for name, flh in self.fast_likelihoods.items():
flh.make_measurement(*args, **kwargs)
self._sm_cov_loaded = True
def save_sm_covariances(self, folder):
for name, flh in self.fast_likelihoods.items():
filename = os.path.join(folder, name + '.p')
flh.sm_covariance.save(filename)
def load_sm_covariances(self, folder):
for name, flh in self.fast_likelihoods.items():
filename = os.path.join(folder, name + '.p')
flh.sm_covariance.load(filename)
def save_exp_covariances(self, folder):
for name, flh in self.fast_likelihoods.items():
filename = os.path.join(folder, name + '.p')
flh.exp_covariance.save(filename)
def load_exp_covariances(self, folder):
for name, flh in self.fast_likelihoods.items():
filename = os.path.join(folder, name + '.p')
flh.exp_covariance.load(filename)
@property
def log_likelihood_sm(self):
if self._log_likelihood_sm is None:
self._log_likelihood_sm = self._log_likelihood(self.par_dict_sm, flavio.WilsonCoefficients())
return self._log_likelihood_sm
def _check_sm_cov_loaded(self):
"""Check if the SM covariances have been computed or loaded."""
if not self._sm_cov_loaded:
raise ValueError("Please load or compute the SM covariances first"
" by calling `make_measurement`.")
def get_ckm_sm(self):
scheme = ckm.CKMSchemeRmuBtaunuBxlnuDeltaM()
Vus, Vcb, Vub, delta = scheme.ckm_np(w=None)
return {'Vus': Vus, 'Vcb': Vcb, 'Vub': Vub, 'delta': delta}
@property
def par_dict_sm(self):
"""Return the dictionary of parameters where the four CKM parameters
`Vus`, `Vcb`, `Vub`, `delta` have been replaced by their
"true" values extracted assuming the SM.
They should be almost (but not exactly) equal to the default
flavio CKM parameters."""
if self._par_dict_sm is None:
par_dict_sm = self.par_dict_default.copy()
par_dict_sm.update(self.get_ckm_sm())
self._par_dict_sm = par_dict_sm
return self._par_dict_sm
@property
def obstable_sm(self):
self._check_sm_cov_loaded()
if self._obstable_sm is None:
info = tree() # nested dict
for flh_name, flh in self.fast_likelihoods.items():
# loop over fast likelihoods: they only have a single "measurement"
m = flh.pseudo_measurement
ml = flh.full_measurement_likelihood
pred_sm = ml.get_predictions_par(self.par_dict_sm,
flavio.WilsonCoefficients())
sm_cov = flh.sm_covariance.get(force=False)
_, exp_cov = flh.exp_covariance.get(force=False)
inspire_dict = self._get_inspire_dict(flh.observables, ml)
for i, obs in enumerate(flh.observables):
info[obs]['lh_name'] = flh_name
info[obs]['name'] = obs if isinstance(obs, str) else obs[0]
info[obs]['th. unc.'] = np.sqrt(sm_cov[i, i])
info[obs]['experiment'] = m.get_central(obs)
info[obs]['exp. unc.'] = np.sqrt(exp_cov[i, i])
info[obs]['exp. PDF'] = NormalDistribution(m.get_central(obs), np.sqrt(exp_cov[i, i]))
info[obs]['inspire'] = sorted(set(inspire_dict[obs]))
info[obs]['ll_sm'] = m.get_logprobability_single(obs, pred_sm[obs])
info[obs]['ll_central'] = m.get_logprobability_single(obs, m.get_central(obs))
for lh_name, lh in self.likelihoods.items():
# loop over "normal" likelihoods
ml = lh.measurement_likelihood
pred_sm = ml.get_predictions_par(self.par_dict_sm,
flavio.WilsonCoefficients())
inspire_dict = self._get_inspire_dict(lh.observables, ml)
for i, obs in enumerate(lh.observables):
obs_dict = flavio.Observable.argument_format(obs, 'dict')
obs_name = obs_dict.pop('name')
with warnings.catch_warnings():
warnings.simplefilter("ignore")
p_comb = flavio.combine_measurements(
obs_name,
include_measurements=ml.get_measurements,
**obs_dict)
info[obs]['experiment'] = p_comb.central_value
info[obs]['exp. unc.'] = max(p_comb.error_left, p_comb.error_right)
info[obs]['exp. PDF'] = p_comb
info[obs]['inspire'] = sorted(set(inspire_dict[obs]))
info[obs]['th. unc.'] = 0
info[obs]['lh_name'] = lh_name
info[obs]['name'] = obs if isinstance(obs, str) else obs[0]
info[obs]['ll_sm'] = p_comb.logpdf([pred_sm[obs]])
info[obs]['ll_central'] = p_comb.logpdf([p_comb.central_value])
self._obstable_sm = info
return self._obstable_sm
def get_wilson(self, wc_dict, scale):
return Wilson(wc_dict, scale=scale, eft=self.eft, basis=self.basis)
def _log_likelihood(self, par_dict, w):
"""Return the log-likelihood as a dictionary for an instance of
`wilson.Wilson`."""
ll = {}
for name, flh in self.fast_likelihoods.items():
ll[name] = flh.log_likelihood(par_dict, w, delta=True)
for name, lh in self.likelihoods.items():
ll[name] = lh.log_likelihood(par_dict, w, delta=True)
for name, clh in self.custom_likelihoods.items():
ll[name] = clh.log_likelihood(par_dict, w, delta=True)
return ll
@dispatch(dict)
def parameter_point(self, wc_dict, scale=None):
"""Choose a point in parameter space by providing a dictionary of
Wilson coefficient values (with keys corresponding to WCxf Wilson
coefficient names) and the input scale."""
if not scale:
raise ValueError("You need to provide a scale")
w = self.get_wilson(wc_dict, scale)
return GlobalLikelihoodPoint(self, w, fix_ckm=self.fix_ckm)
@dispatch(dict, (int, float))
def parameter_point(self, wc_dict, scale):
"""Choose a point in parameter space by providing a dictionary of
Wilson coefficient values (with keys corresponding to WCxf Wilson
coefficient names) and the input scale."""
w = self.get_wilson(wc_dict, scale)
return GlobalLikelihoodPoint(self, w, fix_ckm=self.fix_ckm)
@dispatch(str)
def parameter_point(self, filename):
"""Choose a point in parameter space by providing the path to a WCxf
file."""
with open(filename, 'r') as f:
wc = wcxf.WC.load(f)
w = Wilson.from_wc(wc)
return GlobalLikelihoodPoint(self, w, fix_ckm=self.fix_ckm)
@dispatch(Wilson)
def parameter_point(self, w):
"""Choose a point in parameter space by providing an instance
of `wilson.Wilson`."""
return GlobalLikelihoodPoint(self, w, fix_ckm=self.fix_ckm)
@staticmethod
def _get_inspire_dict(observables, ml):
inspire_dict = {}
obs_set = set(observables)
for m_name in ml.get_measurements:
m_obj = flavio.Measurement[m_name]
for obs in set(m_obj.all_parameters) & obs_set:
if obs in inspire_dict:
inspire_dict[obs].append(m_obj.inspire)
else:
inspire_dict[obs]=[m_obj.inspire]
return inspire_dict
def number_observations_dict(self, exclude_observables=None):
"""Get a dictionary of the number of "observations" for each
sublikelihood.
Here, an "observation" is defined as an individual measurment
of an observable. Thus, the number of observations is always
>= the number of observables.
"""
nobs_dict = {}
for name, flh in self.fast_likelihoods.items():
nobs_dict[name] = len(set(flh.observables) - set(exclude_observables or []))
for name, lh in self.likelihoods.items():
ml = lh.measurement_likelihood
nobs_dict[name] = ml.get_number_observations(
exclude_observables=exclude_observables
)
for name, clh in self.custom_likelihoods.items():
nobs_dict[name] = clh.get_number_observations()
nobs_dict['global'] = sum([v for k, v in nobs_dict.items() if 'custom_' not in k])
return nobs_dict
class CustomLikelihood(object):
def __init__(self, likelihood, observables):
self.likelihood = likelihood
self.observables = observables
self.exclude_obs = self._get_exclude_obs_dict()
def _get_exclude_obs_dict(self):
"""Get a dictionary with observables to be excluded from each
(Fast)Likelihood instance."""
exclude_obs = {}
for lhs_or_flhs in (self.likelihood.likelihoods,
self.likelihood.fast_likelihoods):
for lh_name, lh in lhs_or_flhs.items():
exclude_observables = set(lh.observables) - set(self.observables)
if set(lh.observables) != exclude_observables:
exclude_obs[lh_name] = exclude_observables
return exclude_obs
def log_likelihood(self, par_dict, wc_obj, delta=False):
custom_log_likelihood = 0
for lh_name, exclude_observables in self.exclude_obs.items():
lh = (self.likelihood.fast_likelihoods.get(lh_name)
or self.likelihood.likelihoods.get(lh_name))
custom_log_likelihood += lh.log_likelihood(
par_dict, wc_obj, delta=delta,
exclude_observables=exclude_observables
)
return custom_log_likelihood
def get_number_observations(self):
"""Get the number of observations, defined as individual measurements
of observables."""
nobs = 0
for llh_name, exclude_observables in self.exclude_obs.items():
if llh_name in self.likelihood.fast_likelihoods:
flh = self.likelihood.fast_likelihoods[llh_name]
nobs += len(set(flh.observables) - set(exclude_observables or []))
else:
lh = self.likelihood.likelihoods[llh_name]
ml = lh.measurement_likelihood
nobs += ml.get_number_observations(
exclude_observables=exclude_observables)
return nobs
class GlobalLikelihoodPoint(object):
"""Class representing the properties of the likelihood function at a
specific point in parameter space.
Attributes:
- `log_likelihood_dict`: dictionary with individual contributions
to the log-likelihood
- `value`: Return the numerical values of the global log-likelihood
compared to the SM value (can also be acessed with `float(self)`)
Methods:
- `get_obstable`: return a pandas data frame with the values and pulls
for each individual observable, given the Wilson coefficients
"""
def __init__(self, likelihood, w,
fix_ckm=False):
"""Initialize the `GlobalLikelihoodPoint` instance.
Parameters:
- likelihood: an instance of `GlobalLikelihood`
- w: an instance of `wilson.Wilson`
- fix_ckm: If False (default), automatically determine the CKM elements
in the presence of new physics in processes used to determine these
elements in the SM. If set to True, the CKM elements are fixed to
their SM values, which can lead to inconsistent results, but also
to a significant speedup in specific cases.
"""
self.likelihood = likelihood
likelihood._check_sm_cov_loaded()
self.w_input = w
self.fix_ckm = fix_ckm
self._w = None
self._obstable_tree_cache = None
self._log_likelihood_dict = None
self._par_dict_np = None
@property
def w(self):
if self._w is None:
w = self.w_input
opt = w.get_option('parameters')
par = self.par_dict_np
for p in ['Vus', 'Vcb', 'Vub', 'delta']:
opt[p] = par[p]
w.set_option('parameters', opt)
self._w = w
return self._w
def get_ckm_np(self):
"""return the values of the four "true" CKM parameters
`Vus`, `Vcb`, `Vub`, `delta`, extracted from the four input observables
for this parameter point in Wilson coefficient space."""
# the default 4-observable scheme
scheme = ckm.CKMSchemeRmuBtaunuBxlnuDeltaM()
try:
Vus, Vcb, Vub, delta = scheme.ckm_np(self.w_input)
except ValueError:
# this happens mostly when the formulas result in |cos(delta)| > 1
raise ValueError("The extraction of CKM elements failed. Too large NP effects?")
return {'Vus': Vus, 'Vcb': Vcb, 'Vub': Vub, 'delta': delta}
@property
def par_dict_np(self):
"""Return the dictionary of parameters where the four CKM parameters
`Vus`, `Vcb`, `Vub`, `delta` have been replaced by their
"true" values as extracted from the four input observables.
Note that if `fix_ckm` is set to `True`, this method actually
returns the SM values."""
if self.fix_ckm:
return self.likelihood.par_dict_sm
if self._par_dict_np is None:
par_dict_np = self.likelihood.par_dict_default.copy()
par_dict_np.update(self.get_ckm_np())
self._par_dict_np = par_dict_np
return self._par_dict_np
def _delta_log_likelihood(self):
"""Compute the delta log likelihood for the individual likelihoods"""
ll = self.likelihood._log_likelihood(self.par_dict_np, self.w)
for name in ll:
ll[name] -= self.likelihood.log_likelihood_sm[name]
ll['global'] = sum([v for k, v in ll.items() if 'custom_' not in k])
return ll
def log_likelihood_dict(self):
"""Return a dictionary with the delta log likelihood values
for the individual contributions.
Cached after the first call."""
if self._log_likelihood_dict is None:
self._log_likelihood_dict = self._delta_log_likelihood()
return self._log_likelihood_dict
def log_likelihood_global(self):
"""Return the value of the global delta log likelihood.
Cached after the first call. Corresponds to the `global` key of
the dictionary returned by `log_likelihood_dict`."""
return self.log_likelihood_dict()['global']
def pvalue_dict(self, n_par=0):
r"""Dictionary of $p$ values of sublikelihoods given the number `n_par`
of free parameters (default 0)."""
nobs = self.likelihood.number_observations_dict()
chi2 = self.chi2_dict()
return {k: pvalue(chi2[k], dof=max(1, nobs[k] - n_par)) for k in chi2}
def chi2_dict(self):
r"""Dictionary of total $\chi^2$ values of each sublikelihood.
$$\chi^2 = -2 (\ln L + \ln L_\text{SM})$$
"""
ll = self.log_likelihood_dict()
llsm = self.likelihood._log_likelihood_sm.copy()
llsm['global'] = sum([v for k, v in llsm.items() if 'custom_' not in k])
return {k: -2 * (ll[k] + llsm[k]) for k in ll}
@property
def _obstable_tree(self):
if not self._obstable_tree_cache:
llh = self.likelihood
info = copy(llh.obstable_sm)
for flh_name, flh in llh.fast_likelihoods.items():
# loop over fast likelihoods: they only have a single "measurement"
m = flh.pseudo_measurement
ml = flh.full_measurement_likelihood
pred = ml.get_predictions_par(self.par_dict_np, self.w)
for i, obs in enumerate(flh.observables):
info[obs]['theory'] = pred[obs]
ll_central = info[obs]['ll_central']
ll_sm = info[obs]['ll_sm']
ll = m.get_logprobability_single(obs, pred[obs])
# DeltaChi2 is -2*DeltaLogLikelihood
info[obs]['pull exp.'] = pull(-2 * (ll - ll_central), dof=1)
s = -1 if ll > ll_sm else 1
info[obs]['pull SM'] = s * pull(-2 * (ll - ll_sm), dof=1)
for lh_name, lh in llh.likelihoods.items():
# loop over "normal" likelihoods
ml = lh.measurement_likelihood
pred = ml.get_predictions_par(self.par_dict_np, self.w)
for i, obs in enumerate(lh.observables):
info[obs]['theory'] = pred[obs]
ll_central = info[obs]['ll_central']
ll_sm = info[obs]['ll_sm']
p_comb = info[obs]['exp. PDF']
ll = p_comb.logpdf([pred[obs]])
info[obs]['pull exp.'] = pull(-2 * (ll - ll_central), dof=1)
s = -1 if ll > ll_sm else 1
info[obs]['pull SM'] = s * pull(-2 * (ll - ll_sm), dof=1)
self._obstable_tree_cache = info
return self._obstable_tree_cache
def obstable(self, min_pull_exp=0, sort_by='pull exp.', ascending=None,
min_val=None, max_val=None):
r"""Return a pandas data frame with the central values and uncertainties
as well as the pulls with respect to the experimental and the SM values for each observable.
The pull is defined is $\sqrt(|-2\ln L|)$. Note that the global
likelihood is *not* simply proportional to the sum of squared pulls
due to correlations.
"""
sort_keys = ['name', 'exp. unc.', 'experiment', 'pull SM', 'pull exp.',
'th. unc.', 'theory']
if sort_by not in sort_keys:
raise ValueError(
"'{}' is not an allowed value for sort_by. Allowed values are "
"'{}', and '{}'.".format(sort_by, "', '".join(sort_keys[:-1]),
sort_keys[-1])
)
info = self._obstable_tree
subset = None
if sort_by == 'pull exp.':
# if sorted by pull exp., use descending order as default
if ascending is None:
ascending = False
if min_val is not None:
min_val = max(min_pull_exp, min_val)
else:
min_val = min_pull_exp
elif min_pull_exp != 0:
subset = lambda row: row['pull exp.'] >= min_pull_exp
# if sorted not by pull exp., use ascending order as default
if ascending is None:
ascending = True
info = self._obstable_filter_sort(info, sortkey=sort_by,
ascending=ascending,
min_val=min_val, max_val=max_val,
subset=subset)
# create DataFrame
df = pd.DataFrame(info).T
# if df has length 0 (e.g. if min_pull is very large) there are no
# columns that could be removed
if len(df) >0:
# remove columns that are only used internal and should not be
# included in obstable
del(df['inspire'])
del(df['lh_name'])
del(df['name'])
del(df['exp. PDF'])
del(df['ll_central'])
del(df['ll_sm'])
return df
@staticmethod
def _obstable_filter_sort(info, sortkey='name', ascending=True,
min_val=None, max_val=None,
subset=None, max_rows=None):
# impose min_val and max_val
if min_val is not None:
info = {obs:row for obs,row in info.items()
if row[sortkey] >= min_val}
if max_val is not None:
info = {obs:row for obs,row in info.items()
if row[sortkey] <= max_val}
# get only subset:
if subset is not None:
info = {obs:row for obs,row in info.items() if subset(row)}
# sort
info = OrderedDict(sorted(info.items(), key=lambda x: x[1][sortkey],
reverse=(not ascending)))
# restrict number of rows per tabular to max_rows
if max_rows is None or len(info)<=max_rows:
return info
else:
info_list = []
for n in range(ceil(len(info)/max_rows)):
info_n = OrderedDict((obs,row)
for i,(obs,row) in enumerate(info.items())
if i>=n*max_rows and i<(n+1)*max_rows)
info_list.append(info_n)
return info_list
|
the-stack_0_11809 | import pexpect
import argparse
import os
import os.path
import subprocess
import sys
class RepositorySet:
def __init__(self, repository_root, repositories):
self.repository_root = repository_root
self.repositories = repositories
class Repository:
def __init__(self, name, origin_url, remote_urls):
self.name = name
self.origin_url = origin_url
self.remote_urls = remote_urls
class Credentials:
def __init__(self, username, password):
self.username = username
self.password = password
class Command:
def __init__(self, name, repository_root, username='', password=''):
self.name = name
self.repository_root = repository_root
self.credentials = Credentials(username, password)
def is_list(self):
return self.name == 'list'
def is_update_local(self):
return self.name == 'update-local'
def is_update_remote(self):
return self.name == 'update-remote'
def is_git_repo(name):
old_working_dir = os.getcwd()
ret = None
if os.path.isdir(name):
os.chdir(name)
result = subprocess.run(
['git', 'rev-parse', '--is-inside-work-tree'],
capture_output=True,
text=True
)
if result.returncode == 0:
ret = True
else:
ret = False
else:
ret = False
os.chdir(old_working_dir)
assert ret is not None
return ret
def get_url_by_label(label):
result = subprocess.run(
['git', 'remote', 'get-url', label],
capture_output=True,
text=True
)
if result.returncode == 0:
return result.stdout.rstrip()
else:
raise ValueError(f'The git repository does not have a URL named {label}')
def remote_url_labels():
result = subprocess.run(
['git', 'remote'],
capture_output=True,
text=True
)
remote_labels = result.stdout.rstrip().split('\n')
return remote_labels
def remote_urls(labels, exclude=['origin']):
urls = {}
for label in (label for label in labels if label not in exclude):
try:
url = get_url_by_label(label)
except ValueError:
url = ''
urls[label] = url
return urls
def scan_repository_root(repository_root):
repositories = {}
old_working_dir = os.getcwd()
os.chdir(repository_root)
for name in (name for name in os.listdir(repository_root) if is_git_repo(name)):
os.chdir(name)
try:
origin_url = get_url_by_label('origin')
except ValueError:
origin_url = ''
labels = remote_url_labels()
urls = remote_urls(labels)
repository = Repository(name, origin_url, urls)
repositories[name] = repository
os.chdir(os.path.pardir)
os.chdir(old_working_dir)
return RepositorySet(repository_root, repositories)
def run_list(repository_set):
print(f'Found {len(repository_set.repositories)} Git repositories in `{repository_set.repository_root}`\n')
for repo in repository_set.repositories.values():
repo_path = os.path.join(repository_set.repository_root, repo.name)
print(f'Repository: {repo_path}')
print(f'Origin: {repo.origin_url}')
print(f'Remote URLs: {repo.remote_urls}\n')
def run_update_local(repository_set):
def git_pull():
return subprocess.run(
['git', 'pull', 'origin'],
capture_output=True,
text=True
)
old_working_dir = os.getcwd()
os.chdir(repository_set.repository_root)
for repository in repository_set.repositories.values():
os.chdir(repository.name)
result = git_pull()
if result.returncode == 0:
print(f'The repository `{repository.name}` has been updated successfully.')
else:
print(f'An error occurred in updating the repository `{repository.name}`')
print(f'{result.stderr}')
print(f'{result.stdout}')
os.chdir(os.path.pardir)
os.chdir(old_working_dir)
def run_update_remote(repository_set, credentials):
def git_push(label):
return pexpect.run(
f'git push {label} --all',
withexitstatus=1,
events={
'(?i)Username for': f'{credentials.username}\n',
'(?i)Password for': f'{credentials.password}\n'
}
)
old_working_dir = os.getcwd()
os.chdir(repository_set.repository_root)
for repository in repository_set.repositories.values():
os.chdir(repository.name)
for label, remote_url in repository.remote_urls.items():
command_output, exit_status = git_push(label)
if exit_status == 0:
print(
f'The remote copy of repository of `{repository.name}` with '
f'the name `{label}` and the URL `{remote_url}` has been '
f'updated successfully.'
)
else:
print(
f'An error occurred in updating the remote copy of the '
f'repository `{repository.name}` to the URL named `{label}` at URL `{remote_url}`.'
)
print(command_output)
os.chdir(os.path.pardir)
os.chdir(old_working_dir)
def run_command(command, repository_set):
if command.is_list():
run_list(repository_set)
elif command.is_update_local():
run_update_local(repository_set)
elif command.is_update_remote():
run_update_remote(repository_set, command.credentials)
else:
raise ValueError(f'The command name `{command.name}` is not a valid command.')
def arg_parser():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(
title='subcommands',
description='valid subcommands',
help='subcommand help'
)
# Subparser for the list command.
parser_list = subparsers.add_parser(
'list',
help='Search a directory for git repositories'
)
parser_list.add_argument(
'path', type=str,
help='The path to the git repository directory'
)
# Subparser for the update-local command.
parser_update_local = subparsers.add_parser(
'update-local',
help='Update the local copies of each git repository'
)
parser_update_local.add_argument(
'path', type=str,
help='The path to the git repository directory'
)
# Subparser for the update-remote command.
parser_update_remote = subparsers.add_parser(
'update-remote',
help='Update the remote copies of each git repository'
)
parser_update_remote.add_argument(
'-u', '--username',
help='Username for remote git repositories'
)
parser_update_remote.add_argument(
'-p', '--password',
help='Password or personal access token for remote git repositories'
)
parser_update_remote.add_argument(
'path', type=str,
help='The path to the git repository directory'
)
return parser
def parse_args(args):
command_args = arg_parser().parse_args(args[1:])
if args[1] == 'list':
path = command_args.path
return Command(args[1], path)
elif args[1] == 'update-local':
path = command_args.path
return Command(args[1], path)
elif args[1] == 'update-remote':
username = command_args.username
password = command_args.password
path = command_args.path
return Command(args[1], path, username, password)
else:
raise ValueError(f'The argument `{args[1]}` is not a valid command name.')
def usage():
return ''.join((
'USAGE:\n',
'List the git repositories in a directory\n',
'`upfork list /path/to/git/repository/forks/`\n',
'Update the local copies of the git repositories in a directory\n',
'`upfork update-local /path/to/git/repository/forks/`\n',
'Update the remote copies of the git repositories in a directory\n',
'`upfork update-remote /path/to/git/repository/forks/`\n'
))
def main():
if len(sys.argv) < 3:
sys.exit(usage())
try:
command = parse_args(sys.argv)
except:
sys.exit(usage())
if not os.path.exists(command.repository_root):
sys.exit(f'Path does not exist: {command.repository_root}')
repository_set = scan_repository_root(command.repository_root)
run_command(command, repository_set)
|
the-stack_0_11810 | #!/usr/bin/env python3
# Copyright 2018 The SwiftShader Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from subprocess import run
import argparse
import multiprocessing
import os
import re
import shutil
LLVM_DIR = os.path.abspath(os.path.join('..', 'llvm'))
LLVM_CONFIGS = os.path.abspath(os.path.join('..', 'configs'))
LLVM_OBJS = os.path.join(os.getcwd(), 'llvm_objs')
LLVM_TARGETS = [
('AArch64', ('__aarch64__',)),
('ARM', ('__arm__',)),
('X86', ('__i386__', '__x86_64__')),
('Mips', ('__mips__',)),
]
LLVM_TRIPLES = {
'android': [
('__x86_64__', 'x86_64-linux-android'),
('__i386__', 'i686-linux-android'),
('__arm__', 'armv7-linux-androideabi'),
('__aarch64__', 'aarch64-linux-android'),
],
'linux': [
('__x86_64__', 'x86_64-unknown-linux-gnu'),
('__i386__', 'i686-pc-linux-gnu'),
('__arm__', 'armv7-linux-gnueabihf'),
('__aarch64__', 'aarch64-linux-gnu'),
('__mips__', 'mipsel-linux-gnu'),
('__mips64', 'mips64el-linux-gnuabi64'),
],
'darwin': [
('__x86_64__', 'x86_64-apple-darwin'),
],
'windows': [
('__x86_64__', 'x86_64-pc-win32'),
('__i386__', 'i686-pc-win32'),
('__arm__', 'armv7-pc-win32'),
('__aarch64__', 'aarch64-pc-win32'),
('__mips__', 'mipsel-pc-win32'),
('__mips64', 'mips64el-pc-win32'),
],
}
LLVM_OPTIONS = [
'-DCMAKE_BUILD_TYPE=Release',
'-DLLVM_TARGETS_TO_BUILD=' + ';'.join(t[0] for t in LLVM_TARGETS),
'-DLLVM_ENABLE_THREADS=OFF',
'-DLLVM_ENABLE_TERMINFO=OFF',
'-DLLVM_ENABLE_LIBXML2=OFF',
'-DLLVM_ENABLE_LIBEDIT=OFF',
'-DLLVM_ENABLE_LIBPFM=OFF',
'-DLLVM_ENABLE_ZLIB=OFF',
]
def _parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('name', help='destination name',
choices=['android', 'linux', 'darwin', 'windows'])
parser.add_argument('-j', '--jobs', help='parallel compilation', type=int)
return parser.parse_args()
def build_llvm(name, num_jobs):
"""Build LLVM and get all generated files."""
if num_jobs is None:
num_jobs = multiprocessing.cpu_count()
"""On Windows we need to have CMake generate build files for the 64-bit
Visual Studio host toolchain."""
host = '-Thost=x64' if name is 'windows' else ''
os.makedirs(LLVM_OBJS, exist_ok=True)
run(['cmake', host, LLVM_DIR] + LLVM_OPTIONS, cwd=LLVM_OBJS)
run(['cmake', '--build', '.', '-j', str(num_jobs)], cwd=LLVM_OBJS)
def list_files(src_base, src, dst_base, suffixes):
"""Enumerate the files that are under `src` and end with one of the
`suffixes` and yield the source path and the destination path."""
src_base = os.path.abspath(src_base)
src = os.path.join(src_base, src)
for base_dir, dirnames, filenames in os.walk(src):
for filename in filenames:
if os.path.splitext(filename)[1] in suffixes:
relative = os.path.relpath(base_dir, src_base)
yield (os.path.join(base_dir, filename),
os.path.join(dst_base, relative, filename))
def copy_common_generated_files(dst_base):
"""Copy platform-independent generated files."""
suffixes = {'.inc', '.h', '.def'}
subdirs = [
os.path.join('include', 'llvm', 'IR'),
os.path.join('include', 'llvm', 'Support'),
os.path.join('lib', 'IR'),
os.path.join('lib', 'Target', 'AArch64'),
os.path.join('lib', 'Target', 'ARM'),
os.path.join('lib', 'Target', 'X86'),
os.path.join('lib', 'Target', 'Mips'),
os.path.join('lib', 'Transforms', 'InstCombine'),
]
for subdir in subdirs:
for src, dst in list_files(LLVM_OBJS, subdir, dst_base, suffixes):
os.makedirs(os.path.dirname(dst), exist_ok=True)
shutil.copyfile(src, dst)
def copy_platform_file(platform, src, dst):
"""Copy platform-dependent generated files and add platform-specific
modifications."""
# LLVM configuration patterns to be post-processed.
llvm_target_pattern = re.compile('^LLVM_[A-Z_]+\\(([A-Za-z0-9_]+)\\)$')
llvm_native_pattern = re.compile(
'^#define LLVM_NATIVE_([A-Z]+) (LLVMInitialize)?(.*)$')
llvm_triple_pattern = re.compile('^#define (LLVM_[A-Z_]+_TRIPLE) "(.*)"$')
llvm_define_pattern = re.compile('^#define ([A-Za-z0-9_]+) (.*)$')
# LLVM configurations to be undefined.
undef_names = [
'BACKTRACE_HEADER',
'ENABLE_BACKTRACES',
'ENABLE_CRASH_OVERRIDES',
'HAVE_BACKTRACE',
'HAVE_POSIX_SPAWN',
'HAVE_PTHREAD_GETNAME_NP',
'HAVE_PTHREAD_SETNAME_NP',
'HAVE_TERMIOS_H',
'HAVE_ZLIB_H',
'HAVE__UNWIND_BACKTRACE',
]
# Build architecture-specific conditions.
conds = {}
for arch, defs in LLVM_TARGETS:
conds[arch] = ' || '.join('defined(' + v + ')' for v in defs)
# Get a set of platform-specific triples.
triples = LLVM_TRIPLES[platform]
with open(src, 'r') as src_file:
os.makedirs(os.path.dirname(dst), exist_ok=True)
with open(dst, 'w') as dst_file:
for line in src_file:
if line == '#define LLVM_CONFIG_H\n':
print(line, file=dst_file, end='')
print('', file=dst_file)
print('#if !defined(__i386__) && defined(_M_IX86)', file=dst_file)
print('#define __i386__ 1', file=dst_file)
print('#endif', file=dst_file)
print('', file=dst_file)
print('#if !defined(__x86_64__) && (defined(_M_AMD64) || defined (_M_X64))', file=dst_file)
print('#define __x86_64__ 1', file=dst_file)
print('#endif', file=dst_file)
print('', file=dst_file)
match = llvm_target_pattern.match(line)
if match:
arch = match.group(1)
print('#if ' + conds[arch], file=dst_file)
print(line, file=dst_file, end='')
print('#endif', file=dst_file)
continue
match = llvm_native_pattern.match(line)
if match:
name = match.group(1)
init = match.group(2) or ''
arch = match.group(3)
end = ''
if arch.lower().endswith(name.lower()):
end = arch[-len(name):]
directive = '#if '
for arch, defs in LLVM_TARGETS:
print(directive + conds[arch], file=dst_file)
print('#define LLVM_NATIVE_' + name + ' ' +
init + arch + end, file=dst_file)
directive = '#elif '
print('#else', file=dst_file)
print('#error "unknown architecture"', file=dst_file)
print('#endif', file=dst_file)
continue
match = llvm_triple_pattern.match(line)
if match:
name = match.group(1)
directive = '#if'
for defs, triple in triples:
print(directive + ' defined(' + defs + ')',
file=dst_file)
print('#define ' + name + ' "' + triple + '"',
file=dst_file)
directive = '#elif'
print('#else', file=dst_file)
print('#error "unknown architecture"', file=dst_file)
print('#endif', file=dst_file)
continue
match = llvm_define_pattern.match(line)
if match and match.group(1) in undef_names:
print('/* #undef ' + match.group(1) + ' */', file=dst_file)
continue
print(line, file=dst_file, end='')
def copy_platform_generated_files(platform, dst_base):
"""Copy platform-specific generated files."""
suffixes = {'.inc', '.h', '.def'}
src_dir = os.path.join('include', 'llvm', 'Config')
for src, dst in list_files(LLVM_OBJS, src_dir, dst_base, suffixes):
copy_platform_file(platform, src, dst)
def main():
args = _parse_args()
build_llvm(args.name, args.jobs)
copy_common_generated_files(os.path.join(LLVM_CONFIGS, 'common'))
copy_platform_generated_files(
args.name, os.path.join(LLVM_CONFIGS, args.name))
if __name__ == '__main__':
main()
|
the-stack_0_11811 | """Serializers Alquileres"""
#Django REST Framework
from rest_framework import serializers
#Model
from maquinaria.alquileres.models import Alquiler
from maquinaria.maquinas.models import Maquina
class AlquilerModelSerializer(serializers.ModelSerializer):
"""Modelo Serializer de Cliente"""
class Meta:
"""Clase Meta"""
model = Alquiler
fields = (
'id', 'cliente',
'maquina', 'fecha_inicio',
'fecha_final', 'precio_alquiler'
)
class Update(serializers.Serializer):
def save(self):
maquina=Maquina.objects.get(id=1)
maquina.estado=False
maquina.save()
|
the-stack_0_11812 |
from PIL import Image
# from PIL import GifImagePlugin
import cv2
import numpy as np
import os
#root_dir = os.path.dirname('/Users/apple/Desktop/414project/')
input_video = Image.open("./walking.gif")
frame_length = input_video.n_frames
def track_position_per_frame(f_num):
image = cv2.imread('./walking_frames/frame{}.png'.format(f_num), cv2.IMREAD_UNCHANGED)
#make mask of where the transparent bits are
bg_mask = image[:,:,3] == 0
fg_mask = image[:,:,3] != 0
#replace areas of transparency with black and not transparent with white
image[bg_mask] = [0, 0, 0, 0] #black
image[fg_mask] = [255,255,255, 255] #white
#new image without alpha channel...
img = cv2.cvtColor(image, cv2.COLOR_BGRA2BGR)
white_ = [255,255,255]
#zipped = np.argwhere(img == white_)[0]
zipped_coordinates = np.argwhere(np.all(img == white_,axis =2))
#current_position['YX'] = zipped_coordinates[0]
current_position = (zipped_coordinates[-1].tolist())
current_position = [i * 3 for i in current_position]
current_position[0],current_position[1] = current_position[1],current_position[0]
return current_position
# remain to be changed
def draw_points(img, current_position):
# img = cv2.circle(img, (top_X,top_Y), radius=4, color=(0, 0, 255), thickness=-1)
new_img =cv2.circle(img, (current_position[1],current_position[0]), radius=4, color=(0, 0, 255), thickness=-1)
cv2.imshow('foot position',new_img)
cv2.waitKey()
def main(frame_length):
input_video = Image.open("./walking.gif")
frame_length = input_video.n_frames
motion_trail = []
cut_frames = list(range(0, frame_length, 15))
for i in cut_frames:
input_video.seek(i)
input_video.save('./walking_frames/frame{}.png'.format(i))
motion_trail.append(track_position_per_frame(i))
print(motion_trail)
with open("2d_coordinates.txt", 'w') as file:
for row in motion_trail:
s = " ".join(map(str, row))
file.write(s+'\n')
if __name__ == '__main__':
main(frame_length)
|
the-stack_0_11814 | # coding: utf-8
from __future__ import absolute_import
import pytest
try:
import vtk
except:
vtk = None
from six import string_types
from panel.models.vtk import VTKPlot
from panel.pane import Pane, PaneBase, VTK
vtk_available = pytest.mark.skipif(vtk is None, reason="requires vtk")
def make_render_window():
cone = vtk.vtkConeSource()
coneMapper = vtk.vtkPolyDataMapper()
coneMapper.SetInputConnection(cone.GetOutputPort())
coneActor = vtk.vtkActor()
coneActor.SetMapper(coneMapper)
ren = vtk.vtkRenderer()
ren.AddActor(coneActor)
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
return renWin
def test_get_vtk_pane_type_from_url():
url = r'https://raw.githubusercontent.com/Kitware/vtk-js/master/Data/StanfordDragon.vtkjs'
assert PaneBase.get_pane_type(url) is VTK
def test_get_vtk_pane_type_from_file():
file = r'StanfordDragon.vtkjs'
assert PaneBase.get_pane_type(file) is VTK
@vtk_available
def test_get_vtk_pane_type_from_render_window():
renWin = make_render_window()
assert PaneBase.get_pane_type(renWin) is VTK
def test_vtk_pane_from_url(document, comm):
url = r'https://raw.githubusercontent.com/Kitware/vtk-js/master/Data/StanfordDragon.vtkjs'
pane = Pane(url)
# Create pane
model = pane.get_root(document, comm=comm)
assert isinstance(model, VTKPlot)
assert pane._models[model.ref['id']][0] is model
assert isinstance(model.data, string_types)
@vtk_available
def test_vtk_data_array_dump():
from panel.pane.vtk.vtkjs_serializer import _dump_data_array
root_keys = ['ref', 'vtkClass', 'name', 'dataType',
'numberOfComponents', 'size', 'ranges']
renWin = make_render_window()
renderers = list(renWin.GetRenderers())
ren_props = list(renderers[0].GetViewProps())
mapper = ren_props[0].GetMapper()
mapper.Update() # create data
data = mapper.GetInput().GetPoints().GetData()
scDir = []
root = _dump_data_array(scDir, '', 'test', data)
assert len(set(root_keys) - set(root.keys())) == 0
assert len(scDir) == 1
assert isinstance(scDir[0][0], string_types)
assert isinstance(scDir[0][1], bytes)
|
the-stack_0_11815 | from abc import ABCMeta, abstractmethod
from collections import OrderedDict
from blenderneuron.activity import Activity
class RootGroup:
__metaclass__ = ABCMeta
def __init__(self):
self.name = ""
self.roots = OrderedDict()
self.import_synapses = False
self.interaction_granularity = 'Cell'
self.record_activity = False
self.recording_granularity = 'Cell'
self.record_variable = "v"
self.recording_period = 1.0
self.recording_time_start = 0
self.recording_time_end = 0
self.activity = Activity()
def __str__(self):
return self.name
def clear_activity(self):
# Clear group level activity
self.activity.clear()
# Cell and section level activity
for root in self.roots.values():
root.clear_activity(recursive=True)
# Segment level
for root in self.roots.values():
root.clear_3d_segment_activity()
def to_dict(self,
include_activity=False,
include_root_children=False,
include_coords_and_radii=False):
"""
:param include_activity:
:param include_root_children:
:param include_coords_and_radii:
:return:
"""
result = {
"name": self.name,
"roots": [root.to_dict(include_activity, include_root_children, include_coords_and_radii) for root in self.roots.values()],
"import_synapses": self.import_synapses,
"interaction_granularity": self.interaction_granularity,
"record_activity": self.record_activity,
"recording_granularity": self.recording_granularity,
"record_variable": self.record_variable,
"recording_period": self.recording_period,
"recording_time_start": self.recording_time_start,
"recording_time_end": self.recording_time_end,
}
if include_activity:
result.update({
"activity": self.activity.to_dict(),
})
return result
|
the-stack_0_11819 | #!/usr/bin/env python
# coding: utf-8
import logging
import os
import pickle
import numpy as np
import pandas as pd
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import train_test_split
from lightautoml.automl.presets.tabular_presets import TabularAutoML
from lightautoml.dataset.roles import DatetimeRole
from lightautoml.tasks import Task
def test_tabular_automl_preset():
np.random.seed(42)
logging.basicConfig(format='[%(asctime)s] (%(levelname)s): %(message)s', level=logging.DEBUG)
data = pd.read_csv('../example_data/test_data_files/sampled_app_train.csv')
data['BIRTH_DATE'] = (np.datetime64('2018-01-01') + data['DAYS_BIRTH'].astype(np.dtype('timedelta64[D]'))).astype(str)
data['EMP_DATE'] = (np.datetime64('2018-01-01') + np.clip(data['DAYS_EMPLOYED'], None, 0).astype(np.dtype('timedelta64[D]'))
).astype(str)
data['report_dt'] = np.datetime64('2018-01-01')
data['constant'] = 1
data['allnan'] = np.nan
data.drop(['DAYS_BIRTH', 'DAYS_EMPLOYED'], axis=1, inplace=True)
train, test = train_test_split(data, test_size=2000, random_state=42)
roles = {'target': 'TARGET',
DatetimeRole(base_date=True, seasonality=(), base_feats=False): 'report_dt',
}
task = Task('binary', )
automl = TabularAutoML(task=task, timeout=600, general_params={
'use_algos': [['linear_l2', 'lgb', ], ['linear_l2', 'lgb']],
'nested_cv': True,
'skip_conn': True,
}, nested_cv_params={
'cv': 5,
'n_folds': None
})
oof_pred = automl.fit_predict(train, roles=roles)
test_pred = automl.predict(test)
not_nan = np.any(~np.isnan(oof_pred.data), axis=1)
logging.debug('Check scores...')
print('OOF score: {}'.format(roc_auc_score(train[roles['target']].values[not_nan], oof_pred.data[not_nan][:, 0])))
print('TEST score: {}'.format(roc_auc_score(test[roles['target']].values, test_pred.data[:, 0])))
logging.debug('Pickle automl')
with open('automl.pickle', 'wb') as f:
pickle.dump(automl, f)
logging.debug('Load pickled automl')
with open('automl.pickle', 'rb') as f:
automl = pickle.load(f)
logging.debug('Predict loaded automl')
test_pred = automl.predict(test)
logging.debug('TEST score, loaded: {}'.format(roc_auc_score(test['TARGET'].values, test_pred.data[:, 0])))
os.remove('automl.pickle')
|
the-stack_0_11821 | import warnings
from geopy.compat import urlencode
from geopy.exc import GeocoderParseError, GeocoderServiceError
from geopy.geocoders.base import DEFAULT_SENTINEL, Geocoder
from geopy.location import Location
from geopy.util import logger
__all__ = ("Yandex", )
class Yandex(Geocoder):
"""Yandex geocoder.
Documentation at:
https://tech.yandex.com/maps/doc/geocoder/desc/concepts/input_params-docpage/
.. versionadded:: 1.5.0
"""
api_path = '/1.x/'
def __init__(
self,
api_key=None,
lang=None,
timeout=DEFAULT_SENTINEL,
proxies=DEFAULT_SENTINEL,
user_agent=None,
scheme=None,
format_string=None,
ssl_context=DEFAULT_SENTINEL,
):
"""
.. versionchanged:: 1.14.0
Default scheme has been changed from ``http`` to ``https``.
:param str api_key: Yandex API key (not obligatory)
https://tech.yandex.ru/maps/keys/get/
:param str lang: response locale, the following locales are
supported: ``"ru_RU"`` (default), ``"uk_UA"``, ``"be_BY"``,
``"en_US"``, ``"tr_TR"``.
:param int timeout:
See :attr:`geopy.geocoders.options.default_timeout`.
:param dict proxies:
See :attr:`geopy.geocoders.options.default_proxies`.
:param str user_agent:
See :attr:`geopy.geocoders.options.default_user_agent`.
.. versionadded:: 1.12.0
:param str scheme:
See :attr:`geopy.geocoders.options.default_scheme`.
.. versionadded:: 1.14.0
:param str format_string:
See :attr:`geopy.geocoders.options.default_format_string`.
.. versionadded:: 1.14.0
:type ssl_context: :class:`ssl.SSLContext`
:param ssl_context:
See :attr:`geopy.geocoders.options.default_ssl_context`.
.. versionadded:: 1.14.0
"""
super(Yandex, self).__init__(
format_string=format_string,
scheme=scheme,
timeout=timeout,
proxies=proxies,
user_agent=user_agent,
ssl_context=ssl_context,
)
self.api_key = api_key
self.lang = lang
domain = 'geocode-maps.yandex.ru'
self.api = '%s://%s%s' % (self.scheme, domain, self.api_path)
def geocode(self, query, exactly_one=True, timeout=DEFAULT_SENTINEL):
"""
Return a location point by address.
:param str query: The address or query you wish to geocode.
:param bool exactly_one: Return one result or a list of results, if
available.
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception. Set this only if you wish to override, on this call
only, the value set during the geocoder's initialization.
:rtype: ``None``, :class:`geopy.location.Location` or a list of them, if
``exactly_one=False``.
"""
params = {
'geocode': self.format_string % query,
'format': 'json'
}
if self.api_key:
params['apikey'] = self.api_key
if self.lang:
params['lang'] = self.lang
if exactly_one:
params['results'] = 1
url = "?".join((self.api, urlencode(params)))
logger.debug("%s.geocode: %s", self.__class__.__name__, url)
return self._parse_json(
self._call_geocoder(url, timeout=timeout),
exactly_one,
)
def reverse(
self,
query,
exactly_one=DEFAULT_SENTINEL,
timeout=DEFAULT_SENTINEL,
kind=None,
):
"""
Return an address by location point.
:param query: The coordinates for which you wish to obtain the
closest human-readable addresses.
:type query: :class:`geopy.point.Point`, list or tuple of ``(latitude,
longitude)``, or string as ``"%(latitude)s, %(longitude)s"``.
:param bool exactly_one: Return one result or a list of results, if
available.
.. versionchanged:: 1.14.0
Default value for ``exactly_one`` was ``False``, which differs
from the conventional default across geopy. Please always pass
this argument explicitly, otherwise you would get a warning.
In geopy 2.0 the default value will become ``True``.
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception. Set this only if you wish to override, on this call
only, the value set during the geocoder's initialization.
:param str kind: Type of toponym. Allowed values: `house`, `street`, `metro`,
`district`, `locality`.
.. versionadded:: 1.14.0
:rtype: ``None``, :class:`geopy.location.Location` or a list of them, if
``exactly_one=False``.
"""
if exactly_one is DEFAULT_SENTINEL:
warnings.warn('%s.reverse: default value for `exactly_one` '
'argument will become True in geopy 2.0. '
'Specify `exactly_one=False` as the argument '
'explicitly to get rid of this warning.' % type(self).__name__,
DeprecationWarning, stacklevel=2)
exactly_one = False
try:
point = self._coerce_point_to_string(query, "%(lon)s,%(lat)s")
except ValueError:
raise ValueError("Must be a coordinate pair or Point")
params = {
'geocode': point,
'format': 'json'
}
if self.api_key:
params['apikey'] = self.api_key
if self.lang:
params['lang'] = self.lang
if kind:
params['kind'] = kind
url = "?".join((self.api, urlencode(params)))
logger.debug("%s.reverse: %s", self.__class__.__name__, url)
return self._parse_json(
self._call_geocoder(url, timeout=timeout),
exactly_one
)
def _parse_json(self, doc, exactly_one):
"""
Parse JSON response body.
"""
if doc.get('error'):
raise GeocoderServiceError(doc['error']['message'])
try:
places = doc['response']['GeoObjectCollection']['featureMember']
except KeyError:
raise GeocoderParseError('Failed to parse server response')
def parse_code(place):
"""
Parse each record.
"""
try:
place = place['GeoObject']
except KeyError:
raise GeocoderParseError('Failed to parse server response')
longitude, latitude = [
float(_) for _ in place['Point']['pos'].split(' ')
]
name_elements = ['name', 'description']
location = ', '.join([place[k] for k in name_elements if place.get(k)])
return Location(location, (latitude, longitude), place)
if exactly_one:
try:
return parse_code(places[0])
except IndexError:
return None
else:
return [parse_code(place) for place in places]
|
the-stack_0_11828 | from setuptools import setup
from setuptools import find_packages
NAME = "torbjorn"
AUTHOR = "Ailln"
EMAIL = "[email protected]"
URL = "https://github.com/Ailln/torbjorn"
LICENSE = "MIT License"
DESCRIPTION = "Provide some practical Python decorators."
if __name__ == "__main__":
setup(
name=NAME,
version="0.0.4",
author=AUTHOR,
author_email=EMAIL,
url=URL,
license=LICENSE,
description=DESCRIPTION,
packages=find_packages(),
include_package_data=True,
install_requires=open("./requirements.txt", "r").read().splitlines(),
long_description=open("./README.md", "r").read(),
long_description_content_type='text/markdown',
entry_points={
"console_scripts": [
"torbjorn=torbjorn.shell:run"
]
},
zip_safe=True,
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
]
)
|
the-stack_0_11831 | # -*- coding: utf-8-*-
from __future__ import absolute_import
import atexit
from .plugins import Email
from apscheduler.schedulers.background import BackgroundScheduler
import logging
from . import app_utils
import time
import sys
if sys.version_info < (3, 0):
import Queue as queue # Python 2
else:
import queue # Python 3
class Notifier(object):
class NotificationClient(object):
def __init__(self, gather, timestamp):
self.gather = gather
self.timestamp = timestamp
def run(self):
self.timestamp = self.gather(self.timestamp)
def __init__(self, profile, brain):
self._logger = logging.getLogger(__name__)
self.q = queue.Queue()
self.profile = profile
self.notifiers = []
self.brain = brain
if 'email' in profile and \
('enable' not in profile['email'] or profile['email']['enable']):
self.notifiers.append(self.NotificationClient(
self.handleEmailNotifications, None))
else:
self._logger.debug('email account not set ' +
'in profile, email notifier will not be used')
if 'robot' in profile and profile['robot'] == 'emotibot':
self.notifiers.append(self.NotificationClient(
self.handleRemenderNotifications, None))
sched = BackgroundScheduler(daemon=True)
sched.start()
sched.add_job(self.gather, 'interval', seconds=120)
atexit.register(lambda: sched.shutdown(wait=False))
def gather(self):
[client.run() for client in self.notifiers]
def handleEmailNotifications(self, lastDate):
"""Places new email notifications in the Notifier's queue."""
emails = Email.fetchUnreadEmails(self.profile, since=lastDate)
if emails is None:
return
if emails:
lastDate = Email.getMostRecentDate(emails)
def styleEmail(e):
subject = Email.getSubject(e, self.profile)
if Email.isEchoEmail(e, self.profile):
if Email.isNewEmail(e):
return subject.replace('[echo]', '')
else:
return ""
elif Email.isControlEmail(e, self.profile):
self.brain.query([subject.replace('[control]', '')
.strip()], None, True)
return ""
sender = Email.getSender(e)
return "您有来自 %s 的新邮件 %s" % (sender, subject)
for e in emails:
self.q.put(styleEmail(e))
return lastDate
def handleRemenderNotifications(self, lastDate):
lastDate = time.strftime('%d %b %Y %H:%M:%S')
due_reminders = app_utils.get_due_reminders()
for reminder in due_reminders:
self.q.put(reminder)
return lastDate
def getNotification(self):
"""Returns a notification. Note that this function is consuming."""
try:
notif = self.q.get(block=False)
return notif
except queue.Empty:
return None
def getAllNotifications(self):
"""
Return a list of notifications in chronological order.
Note that this function is consuming, so consecutive calls
will yield different results.
"""
notifs = []
notif = self.getNotification()
while notif:
notifs.append(notif)
notif = self.getNotification()
return notifs
|
the-stack_0_11832 | import numpy as np
from gym_minigrid.minigrid import *
from gym_minigrid.register import register
class Ice(WorldObj):
def __init__(self):
super().__init__('ice', 'blue')
def can_overlap(self):
return True
def render(self, img):
c = (119, 201, 240) # Pale blue
# Background color
fill_coords(img, point_in_rect(0, 1, 0, 1), c)
# Add Ice top object index.
OBJECT_TO_IDX['ice'] = max(OBJECT_TO_IDX.values()) + 1
class IceGridEnv(MiniGridEnv):
def __init__(self, size):
super().__init__(
grid_size=size,
max_steps=4*size*size,
see_through_walls=False,
seed=None
)
def _gen_grid(self, width, height):
assert width >= 5 and height >= 5
self.grid = Grid(width, height)
# Surrounding walls.
self.grid.wall_rect(0, 0, width, height)
# Sample ice patches.
# Chose top left corner.
n_patches = 1
while n_patches > 0:
patch_width = self._rand_int(2, width - 4)
patch_height = self._rand_int(2, height - 4)
# The -2 offset is to account for walls all around the grid.
patch_top_left = (
self._rand_int(1, width - patch_width - 2),
self._rand_int(1, height - patch_height - 2)
)
if patch_top_left != (0, 0):
# Accept patch.
n_patches -= 1
self.add_ice_patch(patch_width, patch_height, patch_top_left)
# Agent top left.
self.agent_pos = (1, 1)
self.agent_dir = 0
# Place goal bottom right.
self.goal_pos = np.array((width - 2, height - 2))
self.put_obj(Goal(), *self.goal_pos)
self.mission = "Get to the goal square"
def add_ice_patch(self, w, h, p):
for i in range(p[0], p[0] + w):
for j in range(p[1], p[1] + h):
self.put_obj(Ice(), i, j)
@property
def on_ice(self):
cur_tile = self.grid.get(*self.agent_pos)
return cur_tile is not None and cur_tile.type == "ice"
def step(self, action):
if not self.on_ice or action != self.actions.forward:
return super().step(action)
# Go forward until not on ice.
while self.on_ice:
fwd_cell = self.grid.get(*self.front_pos)
if fwd_cell == None or fwd_cell.can_overlap():
self.agent_pos = self.front_pos
else:
break
done = self.step_count >= self.max_steps
obs = self.gen_obs()
return obs, 0, done, {}
class IceGridS10Env(IceGridEnv):
def __init__(self):
super().__init__(size=10)
register(
id='MiniGrid-IceGridS10-v0',
entry_point='ice:IceGridS10Env'
) |
the-stack_0_11833 | import os
import subprocess
import sys
from typing import Optional
from briefcase.config import BaseConfig
from briefcase.exceptions import BriefcaseCommandError
from .base import BaseCommand
from .create import DependencyInstallError, write_dist_info
class DevCommand(BaseCommand):
cmd_line = 'briefcase dev'
command = 'dev'
output_format = None
description = 'Run a briefcase project in the dev environment'
@property
def platform(self):
"""The dev command always reports as the local platform."""
return {
'darwin': 'macOS',
'linux': 'linux',
'win32': 'windows',
}[sys.platform]
def bundle_path(self, app):
"A placeholder; Dev command doesn't have a bundle path"
raise NotImplementedError()
def binary_path(self, app):
"A placeholder; Dev command doesn't have a binary path"
raise NotImplementedError()
def distribution_path(self, app):
"A placeholder; Dev command doesn't have a distribution path"
raise NotImplementedError()
def add_options(self, parser):
parser.add_argument(
'-a',
'--app',
dest='appname',
help='The app to run'
)
parser.add_argument(
'-d',
'--update-dependencies',
action="store_true",
help='Update dependencies for app'
)
parser.add_argument(
'--no-run',
dest="run_app",
action="store_false",
default=True,
help='Do not run the app, just install dependencies.'
)
def install_dev_dependencies(self, app: BaseConfig, **options):
"""
Install the dependencies for the app devly.
:param app: The config object for the app
"""
if app.requires:
try:
self.subprocess.run(
[
sys.executable, "-m",
"pip", "install",
"--upgrade",
] + app.requires,
check=True,
)
except subprocess.CalledProcessError:
raise DependencyInstallError()
else:
print("No application dependencies.")
def run_dev_app(self, app: BaseConfig, env: dict, **options):
"""
Run the app in the dev environment.
:param app: The config object for the app
:param env: environment dictionary for sub command
"""
try:
# Invoke the app.
self.subprocess.run(
[sys.executable, "-m", app.module_name],
env=env,
check=True,
)
except subprocess.CalledProcessError:
print()
raise BriefcaseCommandError(
"Unable to start application '{app.app_name}'".format(
app=app
))
def get_environment(self, app):
# Create a shell environment where PYTHONPATH points to the source
# directories described by the app config.
env = os.environ.copy()
env['PYTHONPATH'] = os.pathsep.join(app.PYTHONPATH)
return env
def __call__(
self,
appname: Optional[str] = None,
update_dependencies: Optional[bool] = False,
run_app: Optional[bool] = True,
**options
):
# Confirm all required tools are available
self.verify_tools()
# Which app should we run? If there's only one defined
# in pyproject.toml, then we can use it as a default;
# otherwise look for a -a/--app option.
if len(self.apps) == 1:
app = list(self.apps.values())[0]
elif appname:
try:
app = self.apps[appname]
except KeyError:
raise BriefcaseCommandError(
"Project doesn't define an application named '{appname}'".format(
appname=appname
))
else:
raise BriefcaseCommandError(
"Project specifies more than one application; "
"use --app to specify which one to start."
)
# Look for the existence of a dist-info file.
# If one exists, assume that the dependencies have already been
# installed. If a dependency update has been manually requested,
# do it regardless.
dist_info_path = self.app_module_path(app).parent / '{app.module_name}.dist-info'.format(app=app)
if not run_app:
# If we are not running the app, it means we should update dependencies.
update_dependencies = True
if update_dependencies or not dist_info_path.exists():
print()
print('[{app.app_name}] Installing dependencies...'.format(
app=app
))
self.install_dev_dependencies(app, **options)
write_dist_info(app, dist_info_path)
if run_app:
print()
print('[{app.app_name}] Starting in dev mode...'.format(
app=app
))
env = self.get_environment(app)
state = self.run_dev_app(app, env, **options)
return state
|
the-stack_0_11835 | import numpy as np
import argparse
import nibabel as nib
parser = argparse.ArgumentParser(description='Convert AFNI to RAS')
reqoptions = parser.add_argument_group('Required arguments')
reqoptions.add_argument('-i', '-in', dest="infile", required=True, help='Dir' )
reqoptions.add_argument('-o', '-out', dest="outfile", required=True, help='Dir' )
args = parser.parse_args()
inFile = args.infile #'/mnt/hgfs/ssd_tmp/ASL/056/'
outFile = args.outfile #'/mnt/hgfs/ssd_tmp/ASL/056/'
afni_vec = np.loadtxt(inFile, skiprows=1)
ras_vec = np.zeros((4,4))
ras_vec[0,0] = afni_vec[0]
ras_vec[0,1] = afni_vec[1]
ras_vec[0,2] = -afni_vec[2]
ras_vec[0,3] = -afni_vec[3]
ras_vec[1,0] = afni_vec[4]
ras_vec[1,1] = afni_vec[5]
ras_vec[1,2] = -afni_vec[6]
ras_vec[1,3] = -afni_vec[7]
ras_vec[2,0] = -afni_vec[8]
ras_vec[2,1] = -afni_vec[9]
ras_vec[2,2] = afni_vec[10]
ras_vec[2,3] = afni_vec[11]
ras_vec[3,0] = 0
ras_vec[3,1] = 0
ras_vec[3,2] = 0
ras_vec[3,3] = 1
np.savetxt(outFile, ras_vec, fmt='%0.10f')
|
the-stack_0_11838 | # Copyright (c) Open-MMLab. All rights reserved.
import logging
import torch.nn as nn
import torch.utils.checkpoint as cp
from ..runner import load_checkpoint
from .weight_init import constant_init, kaiming_init
def conv3x3(in_planes, out_planes, stride=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=dilation,
dilation=dilation,
bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
style='pytorch',
with_cp=False):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride, dilation)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
self.dilation = dilation
assert not with_cp
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
style='pytorch',
with_cp=False):
"""Bottleneck block.
If style is "pytorch", the stride-two layer is the 3x3 conv layer,
if it is "caffe", the stride-two layer is the first 1x1 conv layer.
"""
super(Bottleneck, self).__init__()
assert style in ['pytorch', 'caffe']
if style == 'pytorch':
conv1_stride = 1
conv2_stride = stride
else:
conv1_stride = stride
conv2_stride = 1
self.conv1 = nn.Conv2d(
inplanes, planes, kernel_size=1, stride=conv1_stride, bias=False)
self.conv2 = nn.Conv2d(
planes,
planes,
kernel_size=3,
stride=conv2_stride,
padding=dilation,
dilation=dilation,
bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(
planes, planes * self.expansion, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.dilation = dilation
self.with_cp = with_cp
def forward(self, x):
def _inner_forward(x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
return out
def make_res_layer(block,
inplanes,
planes,
blocks,
stride=1,
dilation=1,
style='pytorch',
with_cp=False):
downsample = None
if stride != 1 or inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(
inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(
block(
inplanes,
planes,
stride,
dilation,
downsample,
style=style,
with_cp=with_cp))
inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(
block(inplanes, planes, 1, dilation, style=style, with_cp=with_cp))
return nn.Sequential(*layers)
class ResNet(nn.Module):
"""ResNet backbone.
Args:
depth (int): Depth of resnet, from {18, 34, 50, 101, 152}.
num_stages (int): Resnet stages, normally 4.
strides (Sequence[int]): Strides of the first block of each stage.
dilations (Sequence[int]): Dilation of each stage.
out_indices (Sequence[int]): Output from which stages.
style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two
layer is the 3x3 conv layer, otherwise the stride-two layer is
the first 1x1 conv layer.
frozen_stages (int): Stages to be frozen (all param fixed). -1 means
not freezing any parameters.
bn_eval (bool): Whether to set BN layers as eval mode, namely, freeze
running stats (mean and var).
bn_frozen (bool): Whether to freeze weight and bias of BN layers.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed.
"""
arch_settings = {
18: (BasicBlock, (2, 2, 2, 2)),
34: (BasicBlock, (3, 4, 6, 3)),
50: (Bottleneck, (3, 4, 6, 3)),
101: (Bottleneck, (3, 4, 23, 3)),
152: (Bottleneck, (3, 8, 36, 3))
}
def __init__(self,
depth,
num_stages=4,
strides=(1, 2, 2, 2),
dilations=(1, 1, 1, 1),
out_indices=(0, 1, 2, 3),
style='pytorch',
frozen_stages=-1,
bn_eval=True,
bn_frozen=False,
with_cp=False):
super(ResNet, self).__init__()
if depth not in self.arch_settings:
raise KeyError('invalid depth {} for resnet'.format(depth))
assert num_stages >= 1 and num_stages <= 4
block, stage_blocks = self.arch_settings[depth]
stage_blocks = stage_blocks[:num_stages]
assert len(strides) == len(dilations) == num_stages
assert max(out_indices) < num_stages
self.out_indices = out_indices
self.style = style
self.frozen_stages = frozen_stages
self.bn_eval = bn_eval
self.bn_frozen = bn_frozen
self.with_cp = with_cp
self.inplanes = 64
self.conv1 = nn.Conv2d(
3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.res_layers = []
for i, num_blocks in enumerate(stage_blocks):
stride = strides[i]
dilation = dilations[i]
planes = 64 * 2**i
res_layer = make_res_layer(
block,
self.inplanes,
planes,
num_blocks,
stride=stride,
dilation=dilation,
style=self.style,
with_cp=with_cp)
self.inplanes = planes * block.expansion
layer_name = 'layer{}'.format(i + 1)
self.add_module(layer_name, res_layer)
self.res_layers.append(layer_name)
self.feat_dim = block.expansion * 64 * 2**(len(stage_blocks) - 1)
def init_weights(self, pretrained=None):
if isinstance(pretrained, str):
logger = logging.getLogger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif pretrained is None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, nn.BatchNorm2d):
constant_init(m, 1)
else:
raise TypeError('pretrained must be a str or None')
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
outs = []
for i, layer_name in enumerate(self.res_layers):
res_layer = getattr(self, layer_name)
x = res_layer(x)
if i in self.out_indices:
outs.append(x)
if len(outs) == 1:
return outs[0]
else:
return tuple(outs)
def train(self, mode=True):
super(ResNet, self).train(mode)
if self.bn_eval:
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
if self.bn_frozen:
for params in m.parameters():
params.requires_grad = False
if mode and self.frozen_stages >= 0:
for param in self.conv1.parameters():
param.requires_grad = False
for param in self.bn1.parameters():
param.requires_grad = False
self.bn1.eval()
self.bn1.weight.requires_grad = False
self.bn1.bias.requires_grad = False
for i in range(1, self.frozen_stages + 1):
mod = getattr(self, 'layer{}'.format(i))
mod.eval()
for param in mod.parameters():
param.requires_grad = False
|
the-stack_0_11839 | import sys
import torch
import os
import shutil
from torch.utils.data.dataloader import DataLoader
import random
sys.path.append('.')
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print('\t'.join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def create_exp_dir(path, scripts_to_save=None):
os.makedirs(path, exist_ok=True)
print('Experiment dir : {}'.format(path))
if scripts_to_save is not None:
script_path = os.path.join(path, 'scripts')
if os.path.exists(script_path):
shutil.rmtree(script_path)
os.mkdir(script_path)
for script in scripts_to_save:
dst_file = os.path.join(path, 'scripts', os.path.basename(script))
print(dst_file)
shutil.copytree(script, dst_file)
class ForeverDataIterator:
"""A data iterator that will never stop producing data"""
def __init__(self, data_loader: DataLoader):
self.data_loader = data_loader
self.iter = iter(self.data_loader)
def __next__(self):
try:
data = next(self.iter)
except StopIteration:
self.iter = iter(self.data_loader)
data = next(self.iter)
return data
def __len__(self):
return len(self.data_loader)
|
the-stack_0_11843 | # Write a program that takes a list of numbers (for example, a = [5, 10, 15, 20, 25]) and makes a new list of only the first and last elements of the given list.
# For practice, write this code inside a function.
def first_and_last_element_of_a_list(number_list):
if(len(number_list) <= 1):
return number_list
return [number_list[0], number_list[len(number_list) - 1]]
def run():
a = [5, 10, 15, 20, 25]
b = [2]
c = []
d = [2, 3]
print(first_and_last_element_of_a_list(a))
if __name__ == '__main__':
run()
|
the-stack_0_11844 | import copy
import datetime
from operator import attrgetter
from django.core.exceptions import ValidationError
from django.db import models, router
from django.db.models.sql import InsertQuery
from django.test import TestCase, skipUnlessDBFeature
from django.test.utils import isolate_apps
from django.utils.timezone import get_fixed_timezone
from .models import (
Article, Department, Event, Model1, Model2, Model3, NonAutoPK, Party,
Worker,
)
class ModelTests(TestCase):
def test_model_init_too_many_args(self):
msg = "Number of args exceeds number of fields"
with self.assertRaisesMessage(IndexError, msg):
Worker(1, 2, 3, 4)
# The bug is that the following queries would raise:
# "TypeError: Related Field has invalid lookup: gte"
def test_related_gte_lookup(self):
"""
Regression test for #10153: foreign key __gte lookups.
"""
Worker.objects.filter(department__gte=0)
def test_related_lte_lookup(self):
"""
Regression test for #10153: foreign key __lte lookups.
"""
Worker.objects.filter(department__lte=0)
def test_sql_insert_compiler_return_id_attribute(self):
"""
Regression test for #14019: SQLInsertCompiler.as_sql() failure
"""
db = router.db_for_write(Party)
query = InsertQuery(Party)
query.insert_values([Party._meta.fields[0]], [], raw=False)
# this line will raise an AttributeError without the accompanying fix
query.get_compiler(using=db).as_sql()
def test_empty_choice(self):
# NOTE: Part of the regression test here is merely parsing the model
# declaration. The verbose_name, in particular, did not always work.
a = Article.objects.create(
headline="Look at me!", pub_date=datetime.datetime.now()
)
# An empty choice field should return None for the display name.
self.assertIs(a.get_status_display(), None)
# Empty strings should be returned as string
a = Article.objects.get(pk=a.pk)
self.assertEqual(a.misc_data, '')
def test_long_textfield(self):
# TextFields can hold more than 4000 characters (this was broken in
# Oracle).
a = Article.objects.create(
headline="Really, really big",
pub_date=datetime.datetime.now(),
article_text="ABCDE" * 1000
)
a = Article.objects.get(pk=a.pk)
self.assertEqual(len(a.article_text), 5000)
def test_long_unicode_textfield(self):
# TextFields can hold more than 4000 bytes also when they are
# less than 4000 characters
a = Article.objects.create(
headline="Really, really big",
pub_date=datetime.datetime.now(),
article_text='\u05d0\u05d1\u05d2' * 1000
)
a = Article.objects.get(pk=a.pk)
self.assertEqual(len(a.article_text), 3000)
def test_date_lookup(self):
# Regression test for #659
Party.objects.create(when=datetime.datetime(1999, 12, 31))
Party.objects.create(when=datetime.datetime(1998, 12, 31))
Party.objects.create(when=datetime.datetime(1999, 1, 1))
Party.objects.create(when=datetime.datetime(1, 3, 3))
self.assertQuerysetEqual(
Party.objects.filter(when__month=2), []
)
self.assertQuerysetEqual(
Party.objects.filter(when__month=1), [
datetime.date(1999, 1, 1)
],
attrgetter("when")
)
self.assertQuerysetEqual(
Party.objects.filter(when__month=12), [
datetime.date(1999, 12, 31),
datetime.date(1998, 12, 31),
],
attrgetter("when"),
ordered=False
)
self.assertQuerysetEqual(
Party.objects.filter(when__year=1998), [
datetime.date(1998, 12, 31),
],
attrgetter("when")
)
# Regression test for #8510
self.assertQuerysetEqual(
Party.objects.filter(when__day="31"), [
datetime.date(1999, 12, 31),
datetime.date(1998, 12, 31),
],
attrgetter("when"),
ordered=False
)
self.assertQuerysetEqual(
Party.objects.filter(when__month="12"), [
datetime.date(1999, 12, 31),
datetime.date(1998, 12, 31),
],
attrgetter("when"),
ordered=False
)
self.assertQuerysetEqual(
Party.objects.filter(when__year="1998"), [
datetime.date(1998, 12, 31),
],
attrgetter("when")
)
# Regression test for #18969
self.assertQuerysetEqual(
Party.objects.filter(when__year=1), [
datetime.date(1, 3, 3),
],
attrgetter("when")
)
self.assertQuerysetEqual(
Party.objects.filter(when__year='1'), [
datetime.date(1, 3, 3),
],
attrgetter("when")
)
def test_date_filter_null(self):
# Date filtering was failing with NULL date values in SQLite
# (regression test for #3501, among other things).
Party.objects.create(when=datetime.datetime(1999, 1, 1))
Party.objects.create()
p = Party.objects.filter(when__month=1)[0]
self.assertEqual(p.when, datetime.date(1999, 1, 1))
self.assertQuerysetEqual(
Party.objects.filter(pk=p.pk).dates("when", "month"), [
1
],
attrgetter("month")
)
def test_get_next_prev_by_field(self):
# get_next_by_FIELD() and get_previous_by_FIELD() don't crash when
# microseconds values are stored in the database.
Event.objects.create(when=datetime.datetime(2000, 1, 1, 16, 0, 0))
Event.objects.create(when=datetime.datetime(2000, 1, 1, 6, 1, 1))
Event.objects.create(when=datetime.datetime(2000, 1, 1, 13, 1, 1))
e = Event.objects.create(when=datetime.datetime(2000, 1, 1, 12, 0, 20, 24))
self.assertEqual(
e.get_next_by_when().when, datetime.datetime(2000, 1, 1, 13, 1, 1)
)
self.assertEqual(
e.get_previous_by_when().when, datetime.datetime(2000, 1, 1, 6, 1, 1)
)
def test_get_next_prev_by_field_unsaved(self):
msg = 'get_next/get_previous cannot be used on unsaved objects.'
with self.assertRaisesMessage(ValueError, msg):
Event().get_next_by_when()
with self.assertRaisesMessage(ValueError, msg):
Event().get_previous_by_when()
def test_primary_key_foreign_key_types(self):
# Check Department and Worker (non-default PK type)
d = Department.objects.create(id=10, name="IT")
w = Worker.objects.create(department=d, name="Full-time")
self.assertEqual(str(w), "Full-time")
@skipUnlessDBFeature("supports_timezones")
def test_timezones(self):
# Saving and updating with timezone-aware datetime Python objects.
# Regression test for #10443.
# The idea is that all these creations and saving should work without
# crashing. It's not rocket science.
dt1 = datetime.datetime(2008, 8, 31, 16, 20, tzinfo=get_fixed_timezone(600))
dt2 = datetime.datetime(2008, 8, 31, 17, 20, tzinfo=get_fixed_timezone(600))
obj = Article.objects.create(
headline="A headline", pub_date=dt1, article_text="foo"
)
obj.pub_date = dt2
obj.save()
self.assertEqual(
Article.objects.filter(headline="A headline").update(pub_date=dt1),
1
)
def test_chained_fks(self):
"""
Regression for #18432: Chained foreign keys with to_field produce incorrect query
"""
m1 = Model1.objects.create(pkey=1000)
m2 = Model2.objects.create(model1=m1)
m3 = Model3.objects.create(model2=m2)
# this is the actual test for #18432
m3 = Model3.objects.get(model2=1000)
m3.model2
@isolate_apps('model_regress')
def test_metaclass_can_access_attribute_dict(self):
"""
Model metaclasses have access to the class attribute dict in
__init__() (#30254).
"""
class HorseBase(models.base.ModelBase):
def __init__(cls, name, bases, attrs):
super().__init__(name, bases, attrs)
cls.horns = (1 if 'magic' in attrs else 0)
class Horse(models.Model, metaclass=HorseBase):
name = models.CharField(max_length=255)
magic = True
self.assertEqual(Horse.horns, 1)
class ModelValidationTest(TestCase):
def test_pk_validation(self):
NonAutoPK.objects.create(name="one")
again = NonAutoPK(name="one")
with self.assertRaises(ValidationError):
again.validate_unique()
class EvaluateMethodTest(TestCase):
"""
Regression test for #13640: cannot filter by objects with 'evaluate' attr
"""
def test_model_with_evaluate_method(self):
"""
You can filter by objects that have an 'evaluate' attr
"""
dept = Department.objects.create(pk=1, name='abc')
dept.evaluate = 'abc'
Worker.objects.filter(department=dept)
class ModelFieldsCacheTest(TestCase):
def test_fields_cache_reset_on_copy(self):
department1 = Department.objects.create(id=1, name='department1')
department2 = Department.objects.create(id=2, name='department2')
worker1 = Worker.objects.create(name='worker', department=department1)
worker2 = copy.copy(worker1)
self.assertEqual(worker2.department, department1)
# Changing related fields doesn't mutate the base object.
worker2.department = department2
self.assertEqual(worker2.department, department2)
self.assertEqual(worker1.department, department1)
|
the-stack_0_11847 | from __future__ import annotations
import inspect
from pathlib import Path
import pytest
from _pytest.monkeypatch import MonkeyPatch
import platformdirs
from platformdirs.android import Android
def test_package_metadata() -> None:
assert hasattr(platformdirs, "__version__")
assert hasattr(platformdirs, "__version_info__")
def test_method_result_is_str(func: str) -> None:
method = getattr(platformdirs, func)
result = method()
assert isinstance(result, str)
def test_property_result_is_str(func: str) -> None:
dirs = platformdirs.PlatformDirs("MyApp", "MyCompany", version="1.0")
result = getattr(dirs, func)
assert isinstance(result, str)
def test_method_result_is_path(func_path: str) -> None:
method = getattr(platformdirs, func_path)
result = method()
assert isinstance(result, Path)
def test_property_result_is_path(func_path: str) -> None:
dirs = platformdirs.PlatformDirs("MyApp", "MyCompany", version="1.0")
result = getattr(dirs, func_path)
assert isinstance(result, Path)
def test_function_interface_is_in_sync(func: str) -> None:
function_dir = getattr(platformdirs, func)
function_path = getattr(platformdirs, func.replace("_dir", "_path"))
assert inspect.isfunction(function_dir)
assert inspect.isfunction(function_path)
function_dir_signature = inspect.Signature.from_callable(function_dir)
function_path_signature = inspect.Signature.from_callable(function_path)
assert function_dir_signature.parameters == function_path_signature.parameters
@pytest.mark.parametrize("root", ["A", "/system", None])
@pytest.mark.parametrize("data", ["D", "/data", None])
def test_android_active(monkeypatch: MonkeyPatch, root: str | None, data: str | None) -> None:
for env_var, value in {"ANDROID_DATA": data, "ANDROID_ROOT": root}.items():
if value is None:
monkeypatch.delenv(env_var, raising=False)
else:
monkeypatch.setenv(env_var, value)
expected = root == "/system" and data == "/data"
if expected:
assert platformdirs._set_platform_dir_class() is Android
else:
assert platformdirs._set_platform_dir_class() is not Android
|
the-stack_0_11852 | #!/usr/bin/python3
try:
import os, sys, requests
import argparse, json
import datetime as dt
import configparser
from elasticsearch import Elasticsearch
from github import Github
from string import ascii_letters
print("All libraries/modules loaded as expected !!!!! ")
except Exception as err:
print("Missing Modules =====> %s" %err)
print("Kindly installed using pip3 install <pip-package-name>")
sys.exit(1)
parser=argparse.ArgumentParser(prog='Github_commit_indexer',
epilog=''' NOTE: This script basically pull commits from a public github repo,
then index each commit before storing them into elastic search deployment clould server
''')
parser.add_argument('--GithubUser', nargs='?', default='RockstarLang', help= 'Github user account')
parser.add_argument('--repo', nargs='?', default='rockstar', help= 'Github repo')
if len(sys.argv)==1:
parser.print_help(sys.stderr)
#sys.exit(1)
args=parser.parse_args()
def to_stderr(msg):
print(msg, file=sys.stderr, flush=True)
def error(msg):
to_stderr('ERROR: ' + msg)
sys.exit(1)
def datetime_formater(unformated_datetime):
'''
This function basically convert daytime to human readable format
'''
date_time = unformated_datetime.split("T")
date = date_time[0].split("-")
time = date_time[1].rstrip(ascii_letters).split(":")
formated_datetime = dt.datetime(int(date[0]), int(date[1]), int(date[2]), int(time[0]), int(time[1]), int(time[2]))
return formated_datetime.strftime("%d-%b-%Y %H:%M:%S")
def Elastic_Search(elk_object, commit_document,indx):
try:
'''
Ingesting commit history document to ElasticServer deployment
'''
#elk_object.indices.create(index = indx, ignore=400)
ingest_status = elk_object.index(index=indx, body=commit_document)
if ingest_status["result"] != "created" and int(ingest_status["_shards"]["failed"]) == 1:
print(json.dumps(commit_document, indent = 2))
error("Ingesting to ElasticServer deployment failed for last committed indexed document \n ")
elk_object.indices.refresh(index = indx)
except Exception as err:
error(str(err))
def commit_info(api_commit_url, ElasticSearch):
'''
This function basically pull out needed info to be ingested as index documents for cloud elastic search
'''
global document
global count
try:
commit_data = requests.get(api_commit_url).json()
document.setdefault("Date", datetime_formater(commit_data["commit"]["author"]["date"]))
document.setdefault("Username", commit_data["author"]["login"])
document.setdefault("Message", commit_data["commit"]["message"].replace("\n\n", " "))
Elastic_Search(ElasticSearch, document, document["Username"])
print(json.dumps(document, indent = 2))
print("indexed document ingested into clould deployment successfully !!!!!")
print("\n\n")
document = {}
except Exception as err:
print("\n\n")
error(str("%s: %s" %(err,commit_data["message"])))
if __name__ == '__main__':
try:
document = {}
'''
Parse login credential for Github and ElasticSearch
'''
login_config_parse = configparser.ConfigParser()
login_config_parse.read('login_credential.ini')
# verify that Elastic login_credential.ini file exist
if not os.path.isfile("login_credential.ini"):
print('\n\n### Kindly create a basic authentication file named "login_credential.ini"')
print("[ELASTIC]")
print('cloud_id = "DEPLOYMENT_NAME:CLOUD_ID_DETAILS" ')
print('user = Username' )
print('Password = Password \n\n\n')
print('[GITHUB]')
print('login_or_token = Github Person Access Token')
sys.exit(1)
'''
Connect to Github repo
kindly note that unauthenticated API calls are rate limited to 60 requests/hour
'''
GH = Github(login_or_token=login_config_parse['GITHUB']['login_or_token'])
github_object = GH.get_user(args.GithubUser)
GH_repo = github_object.get_repo(args.repo)
'''
Connect to elastic search cloud deployment using cloud_id & http_auth method
'''
ES = Elasticsearch(
cloud_id = login_config_parse['ELASTIC']['cloud_id'],
http_auth = (login_config_parse['ELASTIC']['user'], login_config_parse['ELASTIC']['password'])
)
#print(json.dumps(ES.info(), indent = 2))
'''
Verify successfull communication with ElasticSearch Deployment
'''
if ES.ping() is not True:
print("Kindly verify your deployment status/login credential, refers to the below official ElasticSearch documentation on basic authentication")
print("https://www.elastic.co/guide/en/cloud/current/ec-getting-started-python.html")
'''
Note:- Wont scale nicely based on github API rate-limiting with limited number of request/hour
'''
commit = GH_repo.get_commits()
count = 0
'''
This loop over commit SHA paginated list, then parse each commit hash signed with SHA to repos commit API url
'''
for commit_hash in commit:
commit_sha = str(commit_hash).split('"')[1]
commit_url = GH_repo.commits_url.split("{/sha}")[0]+"/{}".format(commit_sha)
commit_info(commit_url, ES)
count+=1
print("Process now completed!!!!!!")
except Exception as err:
error(str(err))
|
the-stack_0_11853 | import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from torch.autograd import Variable
class VGG_enc(nn.Module):
def __init__(self, input_channels=6):
super(VGG_enc, self).__init__()
in_channels = input_channels
self.c11 = nn.Conv2d(in_channels, 64, kernel_size=3, padding=1)
self.bn11 = nn.BatchNorm2d(64)
self.c12 = nn.Conv2d(64, 64, kernel_size=3, padding=1)
self.bn12 = nn.BatchNorm2d(64)
self.p1 = nn.MaxPool2d(kernel_size=2, stride=2)
self.c21 = nn.Conv2d(64, 128, kernel_size=3, padding=1)
self.bn21 = nn.BatchNorm2d(128)
self.c22 = nn.Conv2d(128, 128, kernel_size=3, padding=1)
self.bn22 = nn.BatchNorm2d(128)
self.p2 = nn.MaxPool2d(kernel_size=2, stride=2)
self.c31 = nn.Conv2d(128, 256, kernel_size=3, padding=1)
self.bn31 = nn.BatchNorm2d(256)
self.c32 = nn.Conv2d(256, 256, kernel_size=3, padding=1)
self.bn32 = nn.BatchNorm2d(256)
self.c33 = nn.Conv2d(256, 256, kernel_size=3, padding=1)
self.bn33 = nn.BatchNorm2d(256)
self.p3 = nn.MaxPool2d(kernel_size=2, stride=2)
self.c41 = nn.Conv2d(256, 512, kernel_size=3, padding=1)
self.bn41 = nn.BatchNorm2d(512)
self.c42 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.bn42 = nn.BatchNorm2d(512)
self.c43 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.bn43 = nn.BatchNorm2d(512)
self.p4 = nn.MaxPool2d(kernel_size=2, stride=2)
self.c51 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.bn51 = nn.BatchNorm2d(512)
self.c52 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.bn52 = nn.BatchNorm2d(512)
self.c53 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.bn53 = nn.BatchNorm2d(512)
def forward(self, x):
o11 = F.relu(self.bn11(self.c11(x)), inplace=True)
o12 = F.relu(self.bn12(self.c12(o11)), inplace=True)
o1p = self.p1(o12)
o21 = F.relu(self.bn21(self.c21(o1p)), inplace=True)
o22 = F.relu(self.bn22(self.c22(o21)), inplace=True)
o2p = self.p2(o22)
o31 = F.relu(self.bn31(self.c31(o2p)), inplace=True)
o32 = F.relu(self.bn32(self.c32(o31)), inplace=True)
o33 = F.relu(self.bn33(self.c33(o32)), inplace=True)
o3p = self.p3(o33)
o41 = F.relu(self.bn41(self.c41(o3p)), inplace=True)
o42 = F.relu(self.bn42(self.c42(o41)), inplace=True)
o43 = F.relu(self.bn43(self.c43(o42)), inplace=True)
o4p = self.p4(o43)
o51 = F.relu(self.bn51(self.c51(o4p)), inplace=True)
o52 = F.relu(self.bn52(self.c52(o51)), inplace=True)
o53 = F.relu(self.bn53(self.c53(o52)), inplace=True)
return o53, o43, o33
class VGG_dec(nn.Module):
def __init__(self):
super(VGG_dec, self).__init__()
out_channels = 6
self.c53 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.bn53 = nn.BatchNorm2d(512)
self.c52 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.bn52 = nn.BatchNorm2d(512)
self.c51 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.bn51 = nn.BatchNorm2d(512)
self.u5 = nn.Upsample(scale_factor=2, mode='nearest')
self.c43 = nn.Conv2d(1024, 512, kernel_size=3, padding=1)
self.bn43 = nn.BatchNorm2d(512)
self.c42 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.bn42 = nn.BatchNorm2d(512)
self.c41 = nn.Conv2d(512, 256, kernel_size=3, padding=1)
self.bn41 = nn.BatchNorm2d(256)
self.u4 = nn.Upsample(scale_factor=2, mode='nearest')
self.c33 = nn.Conv2d(512, 256, kernel_size=3, padding=1)
self.bn33 = nn.BatchNorm2d(256)
self.c32 = nn.Conv2d(256, 256, kernel_size=3, padding=1)
self.bn32 = nn.BatchNorm2d(256)
self.c31 = nn.Conv2d(256, 128, kernel_size=3, padding=1)
self.bn31 = nn.BatchNorm2d(128)
self.u3 = nn.Upsample(scale_factor=2, mode='nearest')
self.c22 = nn.Conv2d(128, 128, kernel_size=3, padding=1)
self.bn22 = nn.BatchNorm2d(128)
self.c21 = nn.Conv2d(128, 64, kernel_size=3, padding=1)
self.bn21 = nn.BatchNorm2d(64)
self.u2 = nn.Upsample(scale_factor=2, mode='nearest')
self.c12 = nn.Conv2d(64, 64, kernel_size=3, padding=1)
self.bn12 = nn.BatchNorm2d(64)
#self.c11 = nn.Conv2d(64, 64, kernel_size=3, padding=1)
#self.bn11 = nn.BatchNorm2d(64)
def forward(self, i53, i43, i33):
o53 = F.relu(self.bn53(self.c53(i53)), inplace=True)
o52 = F.relu(self.bn52(self.c52(o53)), inplace=True)
o51 = F.relu(self.bn51(self.c51(o52)), inplace=True)
o5u = self.u5(o51)
o5c = torch.cat((o5u, i43), 1)
o43 = F.relu(self.bn43(self.c43(o5c)), inplace=True)
o42 = F.relu(self.bn42(self.c42(o43)), inplace=True)
o41 = F.relu(self.bn41(self.c41(o42)), inplace=True)
o4u = self.u4(o41)
o4c = torch.cat((o4u, i33), 1)
o33 = F.relu(self.bn33(self.c33(o4c)), inplace=True)
o32 = F.relu(self.bn32(self.c32(o33)), inplace=True)
o31 = F.relu(self.bn31(self.c31(o32)), inplace=True)
o3u = self.u3(o31)
o22 = F.relu(self.bn22(self.c22(o3u)), inplace=True)
o21 = F.relu(self.bn21(self.c21(o22)), inplace=True)
o2u = self.u2(o21)
o12 = F.relu(self.bn12(self.c12(o2u)), inplace=True)
#o11 = F.relu(self.bn11(self.c11(o12)), inplace=True)
return o12
class VGG_net(nn.Module):
cfg = [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512]
def __init__(self, input_channels):
super(VGG_net, self).__init__()
self.enc_net = VGG_enc(input_channels)
self.dec_net = VGG_dec()
self.conv_warp = nn.Conv2d(self.cfg[0], 2, kernel_size=3, padding=1)
self.conv_mask = nn.Conv2d(self.cfg[0], 1, kernel_size=3, padding=1)
self.conv_comp = nn.Conv2d(self.cfg[0], 3, kernel_size=3, padding=1)
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
# input: Nx3x3x256x320
def forward(self, x):
dec_feat = self.dec_net(*self.enc_net(x))
flow = self.conv_warp(dec_feat)
mask = self.conv_mask(dec_feat)
comp = self.conv_comp(dec_feat)
return flow, mask, comp
def VGG_Warper(input_channels = 6):
return VGG_net(input_channels)
|
the-stack_0_11855 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for training routines."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import tempfile
import numpy as np
from tensorflow.contrib.keras.python import keras
from tensorflow.python.platform import test
from tensorflow.python.training import training as training_module
try:
import h5py # pylint:disable=g-import-not-at-top
except ImportError:
h5py = None
class TestModelSaving(test.TestCase):
def test_sequential_model_saving(self):
if h5py is None:
return # Skip test if models cannot be saved.
with self.test_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.RepeatVector(3))
model.add(keras.layers.TimeDistributed(keras.layers.Dense(3)))
model.compile(loss=keras.losses.MSE,
optimizer=keras.optimizers.RMSprop(lr=0.0001),
metrics=[keras.metrics.categorical_accuracy],
sample_weight_mode='temporal')
x = np.random.random((1, 3))
y = np.random.random((1, 3, 3))
model.train_on_batch(x, y)
out = model.predict(x)
_, fname = tempfile.mkstemp('.h5')
keras.models.save_model(model, fname)
new_model = keras.models.load_model(fname)
os.remove(fname)
out2 = new_model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
# test that new updates are the same with both models
x = np.random.random((1, 3))
y = np.random.random((1, 3, 3))
model.train_on_batch(x, y)
new_model.train_on_batch(x, y)
out = model.predict(x)
out2 = new_model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
def test_sequential_model_saving_2(self):
if h5py is None:
return # Skip test if models cannot be saved.
with self.test_session():
# test with custom optimizer, loss
class CustomOp(keras.optimizers.RMSprop):
pass
def custom_loss(y_true, y_pred):
return keras.losses.mse(y_true, y_pred)
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.Dense(3))
model.compile(loss=custom_loss, optimizer=CustomOp(), metrics=['acc'])
x = np.random.random((1, 3))
y = np.random.random((1, 3))
model.train_on_batch(x, y)
out = model.predict(x)
_, fname = tempfile.mkstemp('.h5')
keras.models.save_model(model, fname)
model = keras.models.load_model(
fname,
custom_objects={'CustomOp': CustomOp,
'custom_loss': custom_loss})
os.remove(fname)
out2 = model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
def test_functional_model_saving(self):
if h5py is None:
return # Skip test if models cannot be saved.
with self.test_session():
inputs = keras.layers.Input(shape=(3,))
x = keras.layers.Dense(2)(inputs)
output = keras.layers.Dense(3)(x)
model = keras.models.Model(inputs, output)
model.compile(loss=keras.losses.MSE,
optimizer=keras.optimizers.RMSprop(lr=0.0001),
metrics=[keras.metrics.categorical_accuracy])
x = np.random.random((1, 3))
y = np.random.random((1, 3))
model.train_on_batch(x, y)
out = model.predict(x)
_, fname = tempfile.mkstemp('.h5')
keras.models.save_model(model, fname)
model = keras.models.load_model(fname)
os.remove(fname)
out2 = model.predict(x)
self.assertAllClose(out, out2, atol=1e-05)
def test_saving_without_compilation(self):
if h5py is None:
return # Skip test if models cannot be saved.
with self.test_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.Dense(3))
model.compile(loss='mse', optimizer='sgd', metrics=['acc'])
_, fname = tempfile.mkstemp('.h5')
keras.models.save_model(model, fname)
model = keras.models.load_model(fname)
os.remove(fname)
def test_saving_with_tf_optimizer(self):
if h5py is None:
return # Skip test if models cannot be saved.
with self.test_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.Dense(3))
model.compile(loss='mse',
optimizer=training_module.AdadeltaOptimizer(0.1),
metrics=['acc'])
_, fname = tempfile.mkstemp('.h5')
keras.models.save_model(model, fname)
model = keras.models.load_model(fname)
os.remove(fname)
def test_saving_right_after_compilation(self):
if h5py is None:
return # Skip test if models cannot be saved.
with self.test_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.Dense(3))
model.compile(loss='mse', optimizer='sgd', metrics=['acc'])
model.model._make_train_function()
_, fname = tempfile.mkstemp('.h5')
keras.models.save_model(model, fname)
model = keras.models.load_model(fname)
os.remove(fname)
def test_saving_lambda_numpy_array_arguments(self):
if h5py is None:
return # Skip test if models cannot be saved.
mean = np.random.random((4, 2, 3))
std = np.abs(np.random.random((4, 2, 3))) + 1e-5
inputs = keras.layers.Input(shape=(4, 2, 3))
output = keras.layers.Lambda(lambda image, mu, std: (image - mu) / std,
arguments={'mu': mean, 'std': std})(inputs)
model = keras.models.Model(inputs, output)
model.compile(loss='mse', optimizer='sgd', metrics=['acc'])
_, fname = tempfile.mkstemp('.h5')
keras.models.save_model(model, fname)
model = keras.models.load_model(fname)
os.remove(fname)
self.assertAllClose(mean, model.layers[1].arguments['mu'])
self.assertAllClose(std, model.layers[1].arguments['std'])
class TestSequential(test.TestCase):
"""Most Sequential model API tests are covered in `training_test.py`.
"""
def test_basic_methods(self):
model = keras.models.Sequential()
model.add(keras.layers.Dense(1, input_dim=2))
model.add(keras.layers.Dropout(0.3, name='dp'))
model.add(keras.layers.Dense(2, kernel_regularizer='l2',
kernel_constraint='max_norm'))
model.build()
self.assertEqual(model.state_updates, model.model.state_updates)
self.assertEqual(model.get_layer(name='dp').name, 'dp')
def test_sequential_pop(self):
num_hidden = 5
input_dim = 3
batch_size = 5
num_classes = 2
with self.test_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(num_hidden, input_dim=input_dim))
model.add(keras.layers.Dense(num_classes))
model.compile(loss='mse', optimizer='sgd')
x = np.random.random((batch_size, input_dim))
y = np.random.random((batch_size, num_classes))
model.fit(x, y, epochs=1)
model.pop()
self.assertEqual(len(model.layers), 1)
self.assertEqual(model.output_shape, (None, num_hidden))
model.compile(loss='mse', optimizer='sgd')
y = np.random.random((batch_size, num_hidden))
model.fit(x, y, epochs=1)
# Test popping single-layer model
model = keras.models.Sequential()
model.add(keras.layers.Dense(num_hidden, input_dim=input_dim))
model.pop()
self.assertEqual(len(model.layers), 0)
self.assertEqual(len(model.outputs), 0)
# Invalid use case
model = keras.models.Sequential()
with self.assertRaises(TypeError):
model.pop()
def test_sequential_weight_loading(self):
if h5py is None:
return
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir)
h5_path = os.path.join(temp_dir, 'test.h5')
num_hidden = 5
input_dim = 3
batch_size = 5
num_classes = 2
with self.test_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(num_hidden, input_dim=input_dim))
model.add(keras.layers.Dense(num_classes))
x = np.random.random((batch_size, input_dim))
ref_y = model.predict(x)
model.save_weights(h5_path)
model = keras.models.Sequential()
model.add(keras.layers.Dense(num_hidden, input_dim=input_dim))
model.add(keras.layers.Dense(num_classes))
model.load_weights(h5_path)
y = model.predict(x)
self.assertAllClose(y, ref_y)
def test_invalid_use_cases(self):
with self.test_session():
# Added objects must be layer instances
with self.assertRaises(TypeError):
model = keras.models.Sequential()
model.add(None)
# Added layers must have an inputs shape
with self.assertRaises(ValueError):
model = keras.models.Sequential()
model.add(keras.layers.Dense(1))
# Added layers cannot have multiple outputs
class MyLayer(keras.layers.Layer):
def call(self, inputs):
return [3 * inputs, 2 * inputs]
def _compute_output_shape(self, input_shape):
return [input_shape, input_shape]
with self.assertRaises(ValueError):
model = keras.models.Sequential()
model.add(MyLayer(input_shape=(3,)))
with self.assertRaises(TypeError):
model = keras.models.Sequential()
model.add(keras.layers.Dense(1, input_dim=1))
model.add(MyLayer())
# Building empty model
model = keras.models.Sequential()
with self.assertRaises(TypeError):
model.build()
class TestModelCloning(test.TestCase):
def test_clone_sequential_model(self):
with self.test_session():
val_a = np.random.random((10, 4))
val_out = np.random.random((10, 4))
model = keras.models.Sequential()
model.add(keras.layers.Dense(4, input_shape=(4,)))
model.add(keras.layers.Dropout(0.5))
model.add(keras.layers.Dense(4))
# Everything should work in a new session.
keras.backend.clear_session()
with self.test_session():
# With placeholder creation
new_model = keras.models.clone_model(model)
new_model.compile('rmsprop', 'mse')
new_model.train_on_batch(val_a, val_out)
# On top of new tensor
input_a = keras.Input(shape=(4,))
new_model = keras.models.clone_model(
model, input_tensors=input_a)
new_model.compile('rmsprop', 'mse')
new_model.train_on_batch(val_a, val_out)
# On top of new, non-Keras tensor
input_a = keras.backend.variable(val_a)
new_model = keras.models.clone_model(
model, input_tensors=input_a)
new_model.compile('rmsprop', 'mse')
new_model.train_on_batch(None, val_out)
def test_clone_functional_model(self):
with self.test_session():
val_a = np.random.random((10, 4))
val_b = np.random.random((10, 4))
val_out = np.random.random((10, 4))
input_a = keras.Input(shape=(4,))
input_b = keras.Input(shape=(4,))
dense_1 = keras.layers.Dense(4,)
dense_2 = keras.layers.Dense(4,)
x_a = dense_1(input_a)
x_a = keras.layers.Dropout(0.5)(x_a)
x_b = dense_1(input_b)
x_a = dense_2(x_a)
outputs = keras.layers.add([x_a, x_b])
model = keras.models.Model([input_a, input_b], outputs)
# Everything should work in a new session.
keras.backend.clear_session()
with self.test_session():
# With placeholder creation
new_model = keras.models.clone_model(model)
new_model.compile('rmsprop', 'mse')
new_model.train_on_batch([val_a, val_b], val_out)
# On top of new tensors
input_a = keras.Input(shape=(4,), name='a')
input_b = keras.Input(shape=(4,), name='b')
new_model = keras.models.clone_model(
model, input_tensors=[input_a, input_b])
new_model.compile('rmsprop', 'mse')
new_model.train_on_batch([val_a, val_b], val_out)
# On top of new, non-Keras tensors
input_a = keras.backend.variable(val_a)
input_b = keras.backend.variable(val_b)
new_model = keras.models.clone_model(
model, input_tensors=[input_a, input_b])
new_model.compile('rmsprop', 'mse')
new_model.train_on_batch(None, val_out)
def test_model_cloning_invalid_use_cases(self):
seq_model = keras.models.Sequential()
seq_model.add(keras.layers.Dense(4, input_shape=(4,)))
x = keras.Input((4,))
y = keras.layers.Dense(4)(x)
fn_model = keras.models.Model(x, y)
with self.assertRaises(ValueError):
keras.models._clone_functional_model(seq_model)
with self.assertRaises(ValueError):
keras.models._clone_functional_model(None)
with self.assertRaises(ValueError):
keras.models._clone_sequential_model(fn_model)
with self.assertRaises(ValueError):
keras.models._clone_sequential_model(seq_model, input_tensors=[x, x])
with self.assertRaises(ValueError):
keras.models._clone_sequential_model(seq_model, input_tensors=y)
if __name__ == '__main__':
test.main()
|
the-stack_0_11857 | import boto3
import copy
import hashlib
import logging
import json
import time
import typing
import uuid
from bert import \
encoders as bert_encoders, \
datasource as bert_datasource, \
constants as bert_constants
from datetime import datetime, timedelta
logger = logging.getLogger(__name__)
PWN = typing.TypeVar('PWN')
DELAY: int = 15
class QueueItem:
__slots__ = ('_payload', '_identity')
_payload: typing.Dict[str, typing.Any]
_identity: str
def __init__(self: PWN, payload: typing.Dict[str, typing.Any], identity: str = None) -> None:
self._payload = payload
self._identity = identity
def calc_identity(self: PWN) -> str:
if self._identity:
return self._identity
combined: str = ''.join(bert_encoders.encode_identity_object(self._payload))
combined: str = f'{combined}-{uuid.uuid4()}'
return hashlib.sha256(combined.encode(bert_constants.ENCODING)).hexdigest()
def keys(self: PWN) -> typing.Any:
return super(QueueItem, self).keys()
def get(self: PWN, name: str, default: typing.Any = None) -> typing.Any:
return self._payload.get(name, default)
def clone(self: PWN) -> typing.Any:
return self.__class__(copy.deepcopy(self._payload))
def __getitem__(self: PWN, name: str) -> typing.Any:
try:
return self._payload[name]
except KeyError:
raise KeyError(f'key-name[{name}] not found')
def __setitem__(self: PWN, name: str, value: typing.Any) -> None:
self._payload[name] = value
def __delitem__(self: PWN, name: str) -> None:
try:
del self._payload[name]
except KeyError:
raise KeyError(f'key-name[{name}] not found')
class BaseQueue:
_table_name: str
_value: QueueItem
def __init__(self: PWN, table_name: str) -> None:
self._table_name = table_name
self._value = None
def __next__(self) -> typing.Any:
if not self._value is None:
logger.debug('Destroying Value')
self._destroy(self._value)
self._value = None
self._value = self.get()
if self._value is None or self._value == 'STOP':
raise StopIteration
return self._value
def get(self: PWN) -> QueueItem:
raise NotImplementedError
def put(self: PWN, value: typing.Union[typing.Dict[str, typing.Any], QueueItem]) -> None:
raise NotImplementedError
def __iter__(self) -> PWN:
return self
def _destroy(self: PWN, queue_item: QueueItem) -> None:
raise NotImplementedError
def size(self: PWN) -> str:
raise NotImplementedError
class DynamodbQueue(BaseQueue):
_dynamodb_client: 'boto3.client("dynamodb")'
def __init__(self: PWN, table_name: str) -> None:
super(DynamodbQueue, self).__init__(table_name)
self._dynamodb_client = boto3.client('dynamodb')
def _destroy(self: PWN, queue_item: QueueItem, confirm_delete: bool = False) -> None:
if confirm_delete:
self._dynamodb_client.delete_item(
TableName=self._table_name,
Key={'identity': {'S': queue_item.calc_identity()}},
Expected={'identity': {'Exists': True, 'Value': value['identity']}})
else:
self._dynamodb_client.delete_item(
TableName=self._table_name,
Key={'identity': {'S': queue_item.calc_identity()}})
def put(self: PWN, value: typing.Union[typing.Dict[str, typing.Any], QueueItem]) -> None:
if isinstance(value, dict):
queue_item = QueueItem(value)
elif isinstance(value, QueueItem):
queue_item = value
else:
raise NotImplementedError
encoded_value = bert_encoders.encode_object({
'identity': queue_item.calc_identity(),
'datum': queue_item.clone(),
})
self._dynamodb_client.put_item(TableName=self._table_name, Item=encoded_value)
def get(self: PWN) -> typing.Dict[str, typing.Any]:
try:
value: typing.Any = self._dynamodb_client.scan(TableName=self._table_name, Select='ALL_ATTRIBUTES', Limit=1)['Items'][0]
except IndexError:
return None
else:
queue_item = QueueItem(bert_encoders.decode_object(value['datum']), value['identity']['S'])
if value['identity']['S'] in ['sns-entry', 'invoke-arg', 'api-gateway', 'cognito']:
return queue_item
# The order of data when coming out of the database maynot be preserved, resulting in a different identity
# assert queue_item.calc_identity() == value['identity']['S'], f'{queue_item.calc_identity()} != {value["identity"]["S"]}'
return queue_item
class RedisQueue(BaseQueue):
_table_name: str
_redis_client: 'redis-client'
def __init__(self, table_name: str) -> None:
super(RedisQueue, self).__init__(table_name)
self._redis_client = bert_datasource.RedisConnection.ParseURL(bert_constants.REDIS_URL).client()
self._redis_client_async = None
def flushdb(self) -> None:
self._redis_client.flushdb()
def _destroy(self: PWN, queue_item: QueueItem) -> None:
pass
def size(self: PWN) -> int:
return int(self._redis_client.llen(self._table_name))
async def _resolve_connection(self: PWN) -> None:
if self._redis_client_async is None:
self._redis_client_async = await bert_datasource.RedisConnection.ParseURL(bert_constants.REDIS_URL).client_async()
return self._redis_client_async
async def size_async(self: PWN) -> int:
await self._resolve_connection()
return int(await self._redis_client_async.execute('llen', self._table_name))
def get(self) -> QueueItem:
try:
value: str = self._redis_client.lpop(self._table_name).decode(bert_constants.ENCODING)
except AttributeError:
return 'STOP'
else:
# if self._cache_backend.has(value):
# return self._cache_backend.obtain(value)
return bert_encoders.decode_object(json.loads(value)['datum'])
async def get_async(self: PWN, prefetch: int = 1) -> typing.List[QueueItem]:
await self._resolve_connection()
list_len = await self._redis_client_async.execute('llen', self._table_name)
batch = await self._redis_client_async.execute('lrange', self._table_name, 0, prefetch - 1)
if batch:
await self._redis_client_async.execute('ltrim', self._table_name, len(batch), list_len)
return [bert_encoders.decode_object(json.loads(value.decode(bert_constants.ENCODING))['datum']) for value in batch]
return []
def put(self: PWN, value: typing.Dict[str, typing.Any]) -> None:
encoded_value = json.dumps(bert_encoders.encode_object({
'identity': 'local-queue',
'datum': value
})).encode(bert_constants.ENCODING)
# self._cache_backend.store(encoded_value)
self._redis_client.rpush(self._table_name, encoded_value)
async def put_async(self: PWN, values: typing.List[typing.Dict[str, typing.Any]]) -> None:
await self._resolve_connection()
encoded_values = [json.dumps(bert_encoders.encode_object({
'identity': 'local-queue',
'datum': value,
})).encode(bert_constants.ENCODING) for value in values]
await self._redis_client_async.execute('rpush', self._table_name, *encoded_values)
class StreamingQueue(DynamodbQueue):
"""
When deploying functions to AWS Lambda, auto-invocation is available as an option to run the functions. With StreamingQueue, we want to push local objects into
the available API already utilized. We also want to keep the available `put` function so that the `done_queue` api will still push contents into the next `work_queue`.
We'll also argment the local `get` function api and only pull from records local to the stream and not pull from dynamodb.
"""
# Share the memory across invocations, within the same process/thread. This allows for
# comm_binders to be called multipule-times and still pull from the same queue
_queue: typing.List[typing.Dict[str, typing.Any]] = []
def local_put(self: PWN, record: typing.Union[typing.Dict[str, typing.Any], QueueItem]) -> None:
if isinstance(record, dict):
queue_item = QueueItem(bert_encoders.decode_object(record['datum']), record['identity']['S'])
elif isinstance(record, QueueItem):
queue_item = record
self._queue.append(queue_item)
def get(self: PWN) -> QueueItem:
try:
value: QueueItem = self._queue.pop(0)
except IndexError:
# return super(StreamingQueue, self).get()
return None
else:
return value
class LocalQueue(DynamodbQueue):
"""
When testing, its convenient to use only a LocalQueue
"""
_key: str = None
# Share the memory across invocations, within the same process/thread. This allows for
# comm_binders to be called multipule-times and still pull from the same queue
_queue: typing.List[typing.Dict[str, typing.Any]] = []
def __init__(self: PWN, key: str) -> None:
self._key = key
self._value = None
def local_put(self: PWN, record: typing.Dict[str, typing.Any]) -> None:
self._queue.append(copy.deepcopy(record))
def put(self: PWN, record: typing.Dict[str, typing.Any]) -> None:
logger.info(f'LocalQueue Put[{record}]')
def get(self: PWN) -> typing.Dict[str, typing.Any]:
try:
# may need to unpack because local queues are used for debugging in AWS Lambda
value: typing.Any = self._queue.pop(0)
except IndexError:
return None
else:
return value
|
the-stack_0_11858 | import numpy as np
from astropy.wcs.wcsapi import BaseHighLevelWCS
from glue.core import BaseData
from glue_jupyter.bqplot.image import BqplotImageView
from jdaviz.core.registries import viewer_registry
__all__ = ['ImvizImageView']
@viewer_registry("imviz-image-viewer", label="Image 2D (Imviz)")
class ImvizImageView(BqplotImageView):
tools = ['bqplot:panzoom', 'bqplot:rectangle', 'bqplot:circle', 'bqplot:matchwcs']
default_class = None
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.label_mouseover = None
self.add_event_callback(self.on_mouse_or_key_event, events=['mousemove', 'mouseenter',
'mouseleave', 'keydown'])
self.state.show_axes = False
def on_mouse_or_key_event(self, data):
# Find visible layers
visible_layers = [layer for layer in self.state.layers if layer.visible]
if len(visible_layers) == 0:
return
if self.label_mouseover is None:
if 'g-coords-info' in self.session.application._tools:
self.label_mouseover = self.session.application._tools['g-coords-info']
else:
return
if data['event'] == 'mousemove':
# Display the current cursor coordinates (both pixel and world) as
# well as data values. For now we use the first dataset in the
# viewer for the data values.
# Extract first dataset from visible layers and use this for coordinates - the choice
# of dataset shouldn't matter if the datasets are linked correctly
image = visible_layers[0].layer
# Extract data coordinates - these are pixels in the image
x = data['domain']['x']
y = data['domain']['y']
maxsize = int(np.ceil(np.log10(np.max(image.shape)))) + 3
fmt = 'x={0:0' + str(maxsize) + '.1f} y={1:0' + str(maxsize) + '.1f}'
self.label_mouseover.pixel = (fmt.format(x, y))
if isinstance(image.coords, BaseHighLevelWCS):
# Convert these to a SkyCoord via WCS - note that for other datasets
# we aren't actually guaranteed to get a SkyCoord out, just for images
# with valid celestial WCS
try:
celestial_coordinates = (image.coords.pixel_to_world(x, y).icrs
.to_string('hmsdms', precision=4, pad=True))
except Exception:
self.label_mouseover.world = ''
else:
self.label_mouseover.world = f'{celestial_coordinates:32s} (ICRS)'
else:
self.label_mouseover.world = ''
# Extract data values at this position.
# TODO: for now we just use the first visible layer but we should think
# of how to display values when multiple datasets are present.
if x > -0.5 and y > -0.5 and x < image.shape[1] - 0.5 and y < image.shape[0] - 0.5:
attribute = visible_layers[0].attribute
value = image.get_data(attribute)[int(round(y)), int(round(x))]
unit = image.get_component(attribute).units
self.label_mouseover.value = f'{value:+10.5e} {unit}'
else:
self.label_mouseover.value = ''
elif data['event'] == 'mouseleave' or data['event'] == 'mouseenter':
self.label_mouseover.pixel = ""
self.label_mouseover.world = ""
self.label_mouseover.value = ""
if data['event'] == 'keydown' and data['key'] == 'b':
# Simple blinking of images - this will make it so that only one
# layer is visible at a time and cycles through the layers.
if len(self.state.layers) > 1:
# If only one layer is visible, pick the next one to be visible,
# otherwise start from the last visible one.
visible = [ilayer for ilayer, layer in
enumerate(self.state.layers) if layer.visible]
if len(visible) > 0:
next_layer = (visible[-1] + 1) % len(self.state.layers)
self.state.layers[next_layer].visible = True
for ilayer in visible:
if ilayer != next_layer:
self.state.layers[ilayer].visible = False
def set_plot_axes(self):
self.figure.axes[1].tick_format = None
self.figure.axes[0].tick_format = None
self.figure.axes[1].label = "y: pixels"
self.figure.axes[0].label = "x: pixels"
# Make it so y axis label is not covering tick numbers.
self.figure.axes[1].label_offset = "-50"
def data(self, cls=None):
return [layer_state.layer # .get_object(cls=cls or self.default_class)
for layer_state in self.state.layers
if hasattr(layer_state, 'layer') and
isinstance(layer_state.layer, BaseData)]
|
the-stack_0_11861 | import cv2
import numpy as np
from argparse import ArgumentParser
def parse_args():
parser = ArgumentParser()
parser.add_argument('--normal_path', type=str)
parser.add_argument('--depth_path', type=str)
parser.add_argument('--silhou_path', type=str)
parser.add_argument('--output_path', type=str)
parser.add_argument('--mode', type=int) # 0-combine, 1-rescale depth, 2-resize
args = parser.parse_args()
return args.normal_path, args.depth_path, args.silhou_path, args.output_path, args.mode
if __name__ == '__main__':
f_normal, f_depth, f_silhou, f_output, mode = parse_args()
img_normal = cv2.imread(f_normal)
img_depth = cv2.imread(f_depth)
img_silhou = cv2.imread(f_silhou)
if mode == 0:
pass
elif mode == 1:
img_depth = 255.*(img_depth/255. + 0.2)/1.2
img_depth *= (img_silhou>100)
img_normal *= (img_silhou>100)
# elif mode == 2:
img_combine_depth = np.concatenate([img_depth, img_depth], 1)
img_combine_normal = np.concatenate([img_normal, img_normal], 1)
img_combine_sym = np.concatenate([img_silhou, img_silhou], 1)
img_combine = np.concatenate([img_combine_normal, img_combine_depth, img_combine_sym], 1)
cv2.imwrite(f_output, img_combine)
|
the-stack_0_11862 | # coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" The Text REtrieval Conference (TREC) Question Classification dataset."""
from __future__ import absolute_import, division, print_function
import datasets
_CITATION = """\
@inproceedings{li-roth-2002-learning,
title = "Learning Question Classifiers",
author = "Li, Xin and
Roth, Dan",
booktitle = "{COLING} 2002: The 19th International Conference on Computational Linguistics",
year = "2002",
url = "https://www.aclweb.org/anthology/C02-1150",
}
@inproceedings{hovy-etal-2001-toward,
title = "Toward Semantics-Based Answer Pinpointing",
author = "Hovy, Eduard and
Gerber, Laurie and
Hermjakob, Ulf and
Lin, Chin-Yew and
Ravichandran, Deepak",
booktitle = "Proceedings of the First International Conference on Human Language Technology Research",
year = "2001",
url = "https://www.aclweb.org/anthology/H01-1069",
}
"""
_DESCRIPTION = """\
The Text REtrieval Conference (TREC) Question Classification dataset contains 5500 labeled questions in training set and another 500 for test set. The dataset has 6 labels, 47 level-2 labels. Average length of each sentence is 10, vocabulary size of 8700.
Data are collected from four sources: 4,500 English questions published by USC (Hovy et al., 2001), about 500 manually constructed questions for a few rare classes, 894 TREC 8 and TREC 9 questions, and also 500 questions from TREC 10 which serves as the test set.
"""
_URLs = {
"train": "http://cogcomp.org/Data/QA/QC/train_5500.label",
"test": "http://cogcomp.org/Data/QA/QC/TREC_10.label",
}
_COARSE_LABELS = ["DESC", "ENTY", "ABBR", "HUM", "NUM", "LOC"]
_FINE_LABELS = [
"manner",
"cremat",
"animal",
"exp",
"ind",
"gr",
"title",
"def",
"date",
"reason",
"event",
"state",
"desc",
"count",
"other",
"letter",
"religion",
"food",
"country",
"color",
"termeq",
"city",
"body",
"dismed",
"mount",
"money",
"product",
"period",
"substance",
"sport",
"plant",
"techmeth",
"volsize",
"instru",
"abb",
"speed",
"word",
"lang",
"perc",
"code",
"dist",
"temp",
"symbol",
"ord",
"veh",
"weight",
"currency",
]
class Trec(datasets.GeneratorBasedBuilder):
"""TODO: Short description of my dataset."""
VERSION = datasets.Version("1.1.0")
def _info(self):
# TODO: Specifies the datasets.DatasetInfo object
return datasets.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# datasets.features.FeatureConnectors
features=datasets.Features(
{
"label-coarse": datasets.ClassLabel(names=_COARSE_LABELS),
"label-fine": datasets.ClassLabel(names=_FINE_LABELS),
"text": datasets.Value("string"),
}
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage="https://cogcomp.seas.upenn.edu/Data/QA/QC/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# TODO: Downloads the data and defines the splits
# dl_manager is a datasets.download.DownloadManager that can be used to
# download and extract URLs
dl_files = dl_manager.download_and_extract(_URLs)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": dl_files["train"],
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"filepath": dl_files["test"],
},
),
]
def _generate_examples(self, filepath):
""" Yields examples. """
# TODO: Yields (key, example) tuples from the dataset
with open(filepath, "rb") as f:
for id_, row in enumerate(f):
# One non-ASCII byte: sisterBADBYTEcity. We replace it with a space
label, _, text = row.replace(b"\xf0", b" ").strip().decode().partition(" ")
coarse_label, _, fine_label = label.partition(":")
yield id_, {
"label-coarse": coarse_label,
"label-fine": fine_label,
"text": text,
}
|
the-stack_0_11864 | def scrape():
from bs4 import BeautifulSoup
from selenium import webdriver
import pandas as pd
import urllib
import time
URL_mars_news = "https://mars.nasa.gov/news/"
URL_mars_image = "https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars"
URL_mars_weather = "https://twitter.com/marswxreport?lang=en"
URL_mars_facts = "http://space-facts.com/mars/"
URL_mars_hemispheres = "https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars"
# navigate through the URLs and save the htmls
driver = webdriver.Firefox()
driver.get(URL_mars_news)
time.sleep(1)
html_mars_news = driver.page_source
driver.get(URL_mars_image)
time.sleep(1)
html_mars_image = driver.page_source
# driver = webdriver.Firefox()
driver.get(URL_mars_weather)
time.sleep(1)
html_mars_weather = driver.page_source
driver.get(URL_mars_hemispheres)
time.sleep(1)
html_mars_hemispheres = driver.page_source
# Grab Mars News
soup = BeautifulSoup(html_mars_news, "html.parser")
mars_latest_news = soup.find("div", class_="list_text")
mars_latest_news_dict = {
"date": mars_latest_news.contents[0].text,
"headline": mars_latest_news.contents[1].text,
"teaser": mars_latest_news.contents[2].text
}
# Grab latest JPL Mars Image
soup = BeautifulSoup(html_mars_image, "html.parser")
mars_image = soup.find_all("a", class_="fancybox")
mars_image_URL = urllib.parse.urljoin("https://www.jpl.nasa.gov", mars_image[1]["data-fancybox-href"])
# Get Mars Weather
soup = BeautifulSoup(html_mars_weather, "html.parser")
mars_weather = soup.find("div", class_="css-901oao r-jwli3a r-1qd0xha r-a023e6 r-16dba41 r-ad9z0x r-bcqeeo r-bnwqim r-qvutc0").text
# Scrape Mars Facts Table
dfs = pd.read_html(URL_mars_facts)
mars_facts = dfs[0]
# Grab Mars Hemispheres Images
soup = BeautifulSoup(html_mars_hemispheres, "html.parser")
mars_hemispheres = soup.find_all("div", class_="item")
mars_hemisphere_URLs = []
for item in mars_hemispheres:
# cycle through each hemisphere link and grab the download link for the enhance tif file
mars_hemisphere_link = urllib.parse.urljoin("https://astrogeology.usgs.gov", item.a["href"])
driver.get(mars_hemisphere_link)
html_mars_hemisphere = driver.page_source
soup = BeautifulSoup(html_mars_hemisphere, "html.parser")
mars_hemisphere_download_link = soup.find("div", class_="downloads")
# append URL to list
mars_hemisphere_URLs.append(
{
"title": item.div.a.text,
"img_url": mars_hemisphere_download_link.ul.li.a["href"]
}
)
driver.close()
return {
"news": mars_latest_news_dict,
"image": mars_image_URL,
"weather": mars_weather,
"facts": mars_facts,
"hemispheres": mars_hemisphere_URLs
}
# test code
if __name__ == "__main__":
result = scrape()
print(result) |
the-stack_0_11867 | import sys
n, m, *ab = map(int, sys.stdin.read().split())
ab = list(zip(*[iter(ab)] * 2))
root = list(range(n+1)); root[0] = None
height = [0] * (n + 1); height[0] = None
size = [1] * (n + 1); size[0] = None
sys.setrecursionlimit(10 ** 9)
def find_root(v):
u = root[v]
if u == v:
return u
w = find_root(u)
root[v] = w
return w
def unite(v, u):
rv = find_root(v)
ru = find_root(u)
if rv == ru:
return 0
sv = size[rv]
su = size[ru]
if height[v] >= height[u]:
root[ru] = rv
height[rv] = max(height[rv], height[ru] + 1)
size[rv] += size[ru]
else:
root[rv] = ru
size[ru] += size[rv]
return sv * su
def main():
res = [0] * m
for i in range(1, m):
res[i] = res[i-1] + unite(*ab[m-i])
all_pairs = n * (n - 1) // 2
for i in res[::-1]:
yield all_pairs - i
if __name__ == '__main__':
ans = main()
print(*ans, sep='\n')
|
the-stack_0_11868 | # Copyright 2020 Google
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import recirq.quantum_chess.enums as enums
_ORD_A = ord('a')
def to_rank(x: int) -> str:
"""Returns the algebraic notation rank from the x coordinate."""
return chr(_ORD_A + x)
def to_square(x: int, y: int) -> str:
"""Returns the algebraic notation of a square."""
return chr(_ORD_A + x) + str(y + 1)
def x_of(square: str) -> int:
"""Returns x coordinate of an algebraic notation square (e.g. 'f4')."""
return ord(square[0]) - _ORD_A
def y_of(square: str) -> int:
"""Returns y coordinate of an algebraic notation square (e.g. 'f4')."""
return int(square[1]) - 1
class Move:
"""Container class that has the source and target of a quantum chess move.
If the move is a split move, it will have a target2. If a merge move,
it will have a source2 attribute.
For moves that are input from the quantum chess board API, this will
have a move type and variant that determines what kind of move this is
(capture, exclusion, etc).
"""
def __init__(self,
source: str,
target: str,
*,
source2: str = None,
target2: str = None,
move_type: enums.MoveType = None,
move_variant: enums.MoveVariant = None):
self.source = source
self.source2 = source2
self.target = target
self.target2 = target2
self.move_type = move_type
self.move_variant = move_variant
def __eq__(self, other):
if isinstance(other, Move):
return (self.source == other.source and
self.target == other.target and
self.target2 == other.target2)
return False
@classmethod
def from_string(cls, str_to_parse: str):
"""Creates a move from a string shorthand for tests.
Format=source,target,target2,source2:type:variant
with commas omitted.
if target2 is specified, then source2 should
be '--'
Examples:
'a1a2:JUMP:BASIC'
'b1a3c3:SPLIT_JUMP:BASIC'
'a3b1--c3:MERGE_JUMP:BASIC'
"""
fields = str_to_parse.split(':')
if len(fields) != 3:
raise ValueError(f'Invalid move string {str_to_parse}')
source = fields[0][0:2]
target = fields[0][2:4]
move_type = enums.MoveType[fields[1]]
move_variant = enums.MoveVariant[fields[2]]
if len(fields[0]) <= 4:
return cls(source,
target,
move_type=move_type,
move_variant=move_variant)
if len(fields[0]) <= 6:
return cls(source,
target,
target2=fields[0][4:6],
move_type=move_type,
move_variant=move_variant)
return cls(source,
target,
source2=fields[0][6:8],
move_type=move_type,
move_variant=move_variant)
def is_split_move(self) -> bool:
return self.target2 is not None
def __str__(self):
if self.is_split_move():
return self.source + '^' + self.target + self.target2
return self.source + self.target
|
the-stack_0_11869 | # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "LICENSE.txt" file accompanying this file.
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or implied.
# See the License for the specific language governing permissions and limitations under the License.
# This file has a special meaning for pytest. See https://docs.pytest.org/en/2.7.3/plugins.html for
# additional details.
import json
import logging
import os
import random
import re
from pathlib import Path
from shutil import copyfile
from traceback import format_tb
import boto3
import pkg_resources
import pytest
import yaml
from cfn_stacks_factory import CfnStack, CfnStacksFactory
from clusters_factory import Cluster, ClustersFactory
from conftest_markers import (
DIMENSIONS_MARKER_ARGS,
add_default_markers,
check_marker_dimensions,
check_marker_list,
check_marker_skip_dimensions,
check_marker_skip_list,
)
from conftest_tests_config import apply_cli_dimensions_filtering, parametrize_from_config, remove_disabled_tests
from constants import SCHEDULERS_SUPPORTING_IMDS_SECURED
from filelock import FileLock
from framework.credential_providers import aws_credential_provider, register_cli_credentials_for_region
from framework.tests_configuration.config_renderer import read_config_file
from framework.tests_configuration.config_utils import get_all_regions
from images_factory import Image, ImagesFactory
from jinja2 import Environment, FileSystemLoader
from network_template_builder import Gateways, NetworkTemplateBuilder, SubnetConfig, VPCConfig
from retrying import retry
from utils import (
InstanceTypesData,
create_s3_bucket,
delete_s3_bucket,
dict_add_nested_key,
dict_has_nested_key,
generate_stack_name,
get_architecture_supported_by_instance_type,
get_arn_partition,
get_instance_info,
get_network_interfaces_count,
get_vpc_snakecase_value,
random_alphanumeric,
set_logger_formatter,
)
from tests.common.utils import (
get_installed_parallelcluster_version,
get_sts_endpoint,
retrieve_pcluster_ami_without_standard_naming,
)
def pytest_addoption(parser):
"""Register argparse-style options and ini-style config values, called once at the beginning of a test run."""
parser.addoption("--tests-config-file", help="config file to specify tests and dimensions")
parser.addoption("--regions", help="aws region where tests are executed", nargs="*")
parser.addoption("--instances", help="aws instances under test", nargs="*")
parser.addoption("--oss", help="OSs under test", nargs="*")
parser.addoption("--schedulers", help="schedulers under test", nargs="*")
parser.addoption("--tests-log-file", help="file used to write test logs", default="pytest.log")
parser.addoption("--output-dir", help="output dir for tests artifacts")
# Can't mark fields as required due to: https://github.com/pytest-dev/pytest/issues/2026
parser.addoption("--key-name", help="key to use for EC2 instances", type=str)
parser.addoption("--key-path", help="key path to use for SSH connections", type=str)
parser.addoption("--custom-chef-cookbook", help="url to a custom cookbook package")
parser.addoption(
"--createami-custom-chef-cookbook", help="url to a custom cookbook package for the createami command"
)
parser.addoption("--pcluster-git-ref", help="Git ref of the custom cli package used to build the AMI.")
parser.addoption("--cookbook-git-ref", help="Git ref of the custom cookbook package used to build the AMI.")
parser.addoption("--node-git-ref", help="Git ref of the custom node package used to build the AMI.")
parser.addoption(
"--ami-owner",
help="Override the owner value when fetching AMIs to use with cluster. By default pcluster uses amazon.",
)
parser.addoption("--createami-custom-node-package", help="url to a custom node package for the createami command")
parser.addoption("--custom-awsbatch-template-url", help="url to a custom awsbatch template")
parser.addoption("--cw-dashboard-template-url", help="url to a custom Dashboard cfn template")
parser.addoption("--custom-awsbatchcli-package", help="url to a custom awsbatch cli package")
parser.addoption("--custom-node-package", help="url to a custom node package")
parser.addoption("--custom-ami", help="custom AMI to use in the tests")
parser.addoption("--pre-install", help="url to pre install script")
parser.addoption("--post-install", help="url to post install script")
parser.addoption("--vpc-stack", help="Name of an existing vpc stack.")
parser.addoption("--cluster", help="Use an existing cluster instead of creating one.")
parser.addoption("--public-ecr-image-uri", help="S3 URI of the ParallelCluster API spec")
parser.addoption(
"--api-definition-s3-uri", help="URI of the Docker image for the Lambda of the ParallelCluster API"
)
parser.addoption(
"--api-infrastructure-s3-uri", help="URI of the CloudFormation template for the ParallelCluster API"
)
parser.addoption("--api-uri", help="URI of an existing ParallelCluster API")
parser.addoption("--instance-types-data-file", help="JSON file with additional instance types data")
parser.addoption(
"--credential", help="STS credential endpoint, in the format <region>,<endpoint>,<ARN>,<externalId>.", nargs="+"
)
parser.addoption(
"--no-delete", action="store_true", default=False, help="Don't delete stacks after tests are complete."
)
parser.addoption("--benchmarks-target-capacity", help="set the target capacity for benchmarks tests", type=int)
parser.addoption("--benchmarks-max-time", help="set the max waiting time in minutes for benchmarks tests", type=int)
parser.addoption("--stackname-suffix", help="set a suffix in the integration tests stack names")
parser.addoption(
"--delete-logs-on-success", help="delete CloudWatch logs when a test succeeds", action="store_true"
)
parser.addoption(
"--use-default-iam-credentials",
help="use default IAM creds when running pcluster commands",
action="store_true",
)
def pytest_generate_tests(metafunc):
"""Generate (multiple) parametrized calls to a test function."""
if metafunc.config.getoption("tests_config", None):
parametrize_from_config(metafunc)
else:
_parametrize_from_option(metafunc, "region", "regions")
_parametrize_from_option(metafunc, "instance", "instances")
_parametrize_from_option(metafunc, "os", "oss")
_parametrize_from_option(metafunc, "scheduler", "schedulers")
def pytest_configure(config):
"""This hook is called for every plugin and initial conftest file after command line options have been parsed."""
# read tests config file if used
if config.getoption("tests_config_file", None):
config.option.tests_config = read_config_file(config.getoption("tests_config_file"))
# Read instance types data file if used
if config.getoption("instance_types_data_file", None):
# Load additional instance types data
InstanceTypesData.load_additional_instance_types_data(config.getoption("instance_types_data_file"))
config.option.instance_types_data = InstanceTypesData.additional_instance_types_data
# register additional markers
config.addinivalue_line("markers", "instances(instances_list): run test only against the listed instances.")
config.addinivalue_line("markers", "regions(regions_list): run test only against the listed regions")
config.addinivalue_line("markers", "oss(os_list): run test only against the listed oss")
config.addinivalue_line("markers", "schedulers(schedulers_list): run test only against the listed schedulers")
config.addinivalue_line(
"markers", "dimensions(region, instance, os, scheduler): run test only against the listed dimensions"
)
config.addinivalue_line("markers", "skip_instances(instances_list): skip test for the listed instances")
config.addinivalue_line("markers", "skip_regions(regions_list): skip test for the listed regions")
config.addinivalue_line("markers", "skip_oss(os_list): skip test for the listed oss")
config.addinivalue_line("markers", "skip_schedulers(schedulers_list): skip test for the listed schedulers")
config.addinivalue_line(
"markers", "skip_dimensions(region, instance, os, scheduler): skip test for the listed dimensions"
)
_setup_custom_logger(config.getoption("tests_log_file"))
def pytest_sessionstart(session):
# The number of seconds before a connection to the instance metadata service should time out.
# When attempting to retrieve credentials on an Amazon EC2 instance that is configured with an IAM role,
# a connection to the instance metadata service will time out after 1 second by default. If you know you're
# running on an EC2 instance with an IAM role configured, you can increase this value if needed.
os.environ["AWS_METADATA_SERVICE_TIMEOUT"] = "5"
# When attempting to retrieve credentials on an Amazon EC2 instance that has been configured with an IAM role,
# Boto3 will make only one attempt to retrieve credentials from the instance metadata service before giving up.
# If you know your code will be running on an EC2 instance, you can increase this value to make Boto3 retry
# multiple times before giving up.
os.environ["AWS_METADATA_SERVICE_NUM_ATTEMPTS"] = "5"
# Increasing default max attempts retry
os.environ["AWS_MAX_ATTEMPTS"] = "10"
def pytest_runtest_call(item):
"""Called to execute the test item."""
set_logger_formatter(
logging.Formatter(fmt=f"%(asctime)s - %(levelname)s - %(process)d - {item.name} - %(module)s - %(message)s")
)
logging.info("Running test " + item.name)
def pytest_runtest_logfinish(nodeid, location):
set_logger_formatter(logging.Formatter(fmt="%(asctime)s - %(levelname)s - %(process)d - %(module)s - %(message)s"))
def pytest_collection_modifyitems(session, config, items):
"""Called after collection has been performed, may filter or re-order the items in-place."""
if config.getoption("tests_config", None):
# Remove tests not declared in config file from the collected ones
remove_disabled_tests(session, config, items)
# Apply filtering based on dimensions passed as CLI options
# ("--regions", "--instances", "--oss", "--schedulers")
apply_cli_dimensions_filtering(config, items)
else:
add_default_markers(items)
check_marker_list(items, "instances", "instance")
check_marker_list(items, "regions", "region")
check_marker_list(items, "oss", "os")
check_marker_list(items, "schedulers", "scheduler")
check_marker_skip_list(items, "skip_instances", "instance")
check_marker_skip_list(items, "skip_regions", "region")
check_marker_skip_list(items, "skip_oss", "os")
check_marker_skip_list(items, "skip_schedulers", "scheduler")
check_marker_dimensions(items)
check_marker_skip_dimensions(items)
_add_filename_markers(items, config)
def pytest_collection_finish(session):
_log_collected_tests(session)
def _log_collected_tests(session):
from xdist import get_xdist_worker_id
# Write collected tests in a single worker
# get_xdist_worker_id returns the id of the current worker ('gw0', 'gw1', etc) or 'master'
if get_xdist_worker_id(session) in ["master", "gw0"]:
collected_tests = list(map(lambda item: item.nodeid, session.items))
logging.info(
"Collected tests in regions %s (total=%d):\n%s",
session.config.getoption("regions") or get_all_regions(session.config.getoption("tests_config")),
len(session.items),
json.dumps(collected_tests, indent=2),
)
out_dir = session.config.getoption("output_dir")
with open(f"{out_dir}/collected_tests.txt", "a", encoding="utf-8") as out_f:
out_f.write("\n".join(collected_tests))
out_f.write("\n")
def pytest_exception_interact(node, call, report):
"""Called when an exception was raised which can potentially be interactively handled.."""
logging.error(
"Exception raised while executing %s: %s\n%s",
node.name,
call.excinfo.value,
"".join(format_tb(call.excinfo.tb)),
)
def _extract_tested_component_from_filename(item):
"""Extract portion of test item's filename identifying the component it tests."""
test_location = os.path.splitext(os.path.basename(item.location[0]))[0]
return re.sub(r"test_|_test", "", test_location)
def _add_filename_markers(items, config):
"""Add a marker based on the name of the file where the test case is defined."""
for item in items:
marker = _extract_tested_component_from_filename(item)
# This dynamically registers markers in pytest so that warning for the usage of undefined markers are not
# displayed
config.addinivalue_line("markers", marker)
item.add_marker(marker)
def _parametrize_from_option(metafunc, test_arg_name, option_name):
if test_arg_name in metafunc.fixturenames:
metafunc.parametrize(test_arg_name, metafunc.config.getoption(option_name), scope="class")
def _setup_custom_logger(log_file):
formatter = logging.Formatter(fmt="%(asctime)s - %(levelname)s - %(process)d - %(module)s - %(message)s")
logger = logging.getLogger()
logger.handlers = []
console_handler = logging.StreamHandler()
console_handler.setFormatter(formatter)
logger.setLevel(logging.INFO)
logger.addHandler(console_handler)
file_handler = logging.FileHandler(log_file)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
def _add_properties_to_report(item):
props = []
# Add properties for test dimensions, obtained from fixtures passed to tests
for dimension in DIMENSIONS_MARKER_ARGS:
value = item.funcargs.get(dimension)
if value:
props.append((dimension, value))
# Add property for feature tested, obtained from filename containing the test
props.append(("feature", _extract_tested_component_from_filename(item)))
for dimension_value_pair in props:
if dimension_value_pair not in item.user_properties:
item.user_properties.append(dimension_value_pair)
@pytest.fixture(scope="class")
@pytest.mark.usefixtures("setup_credentials")
def clusters_factory(request, region):
"""
Define a fixture to manage the creation and destruction of clusters.
The configs used to create clusters are dumped to output_dir/clusters_configs/{test_name}.config
"""
factory = ClustersFactory(delete_logs_on_success=request.config.getoption("delete_logs_on_success"))
def _cluster_factory(cluster_config, upper_case_cluster_name=False, **kwargs):
cluster_config = _write_config_to_outdir(request, cluster_config, "clusters_configs")
cluster = Cluster(
name=request.config.getoption("cluster")
if request.config.getoption("cluster")
else "integ-tests-{0}{1}{2}".format(
random_alphanumeric().upper() if upper_case_cluster_name else random_alphanumeric(),
"-" if request.config.getoption("stackname_suffix") else "",
request.config.getoption("stackname_suffix"),
),
config_file=cluster_config,
ssh_key=request.config.getoption("key_path"),
region=region,
)
if not request.config.getoption("cluster"):
cluster.creation_response = factory.create_cluster(cluster, **kwargs)
return cluster
yield _cluster_factory
if not request.config.getoption("no_delete"):
try:
test_passed = request.node.rep_call.passed
except AttributeError:
test_passed = False
factory.destroy_all_clusters(test_passed=test_passed)
@pytest.fixture(scope="session")
def api_server_factory(
cfn_stacks_factory, request, public_ecr_image_uri, api_definition_s3_uri, api_infrastructure_s3_uri
):
"""Creates a factory for deploying API servers on-demand to each region."""
api_servers = {}
def _api_server_factory(server_region):
api_stack_name = generate_stack_name("integ-tests-api", request.config.getoption("stackname_suffix"))
params = [
{"ParameterKey": "EnableIamAdminAccess", "ParameterValue": "true"},
{"ParameterKey": "CreateApiUserRole", "ParameterValue": "false"},
]
if api_definition_s3_uri:
params.append({"ParameterKey": "ApiDefinitionS3Uri", "ParameterValue": api_definition_s3_uri})
if public_ecr_image_uri:
params.append({"ParameterKey": "PublicEcrImageUri", "ParameterValue": public_ecr_image_uri})
template = (
api_infrastructure_s3_uri
or f"https://{server_region}-aws-parallelcluster.s3.{server_region}.amazonaws.com"
f"{'.cn' if server_region.startswith('cn') else ''}"
f"/parallelcluster/{get_installed_parallelcluster_version()}/api/parallelcluster-api.yaml"
)
if server_region not in api_servers:
logging.info(f"Creating API Server stack: {api_stack_name} in {server_region} with template {template}")
stack = CfnStack(
name=api_stack_name,
region=server_region,
parameters=params,
capabilities=["CAPABILITY_NAMED_IAM", "CAPABILITY_AUTO_EXPAND"],
template=template,
)
cfn_stacks_factory.create_stack(stack)
api_servers[server_region] = stack
else:
logging.info(f"Found cached API Server stack: {api_stack_name} in {server_region}")
return api_servers[server_region]
yield _api_server_factory
@pytest.fixture(scope="class")
def api_client(region, api_server_factory, api_uri):
"""Define a fixture for an API client that interacts with the pcluster api."""
from pcluster_client import ApiClient, Configuration
if api_uri:
host = api_uri
else:
stack = api_server_factory(region)
host = stack.cfn_outputs["ParallelClusterApiInvokeUrl"]
api_configuration = Configuration(host=host)
api_configuration.retries = 3
with ApiClient(api_configuration) as api_client_instance:
yield api_client_instance
@pytest.fixture(scope="class")
@pytest.mark.usefixtures("setup_credentials")
def images_factory(request):
"""
Define a fixture to manage the creation and destruction of images.
The configs used to create clusters are dumped to output_dir/images_configs/{test_name}.config
"""
factory = ImagesFactory()
def _image_factory(image_id, image_config, region, **kwargs):
image_config_file = _write_config_to_outdir(request, image_config, "image_configs")
image = Image(
image_id="-".join([image_id, request.config.getoption("stackname_suffix")])
if request.config.getoption("stackname_suffix")
else image_id,
config_file=image_config_file,
region=region,
)
factory.create_image(image, **kwargs)
if image.image_status != "BUILD_IN_PROGRESS" and kwargs.get("log_error", False):
logging.error("image %s creation failed", image_id)
return image
yield _image_factory
factory.destroy_all_images()
def _write_config_to_outdir(request, config, config_dir):
out_dir = request.config.getoption("output_dir")
# Sanitize config file name to make it Windows compatible
# request.node.nodeid example:
# 'dcv/test_dcv.py::test_dcv_configuration[eu-west-1-c5.xlarge-centos7-slurm-8443-0.0.0.0/0-/shared]'
test_file, test_name = request.node.nodeid.split("::", 1)
config_file_name = "{0}-{1}".format(test_file, test_name.replace("/", "_"))
os.makedirs(
"{out_dir}/{config_dir}/{test_dir}".format(
out_dir=out_dir, config_dir=config_dir, test_dir=os.path.dirname(test_file)
),
exist_ok=True,
)
config_dst = "{out_dir}/{config_dir}/{config_file_name}.config".format(
out_dir=out_dir, config_dir=config_dir, config_file_name=config_file_name
)
copyfile(config, config_dst)
return config_dst
@pytest.fixture()
def test_datadir(request, datadir):
"""
Inject the datadir with resources for the specific test function.
If the test function is declared in a class then datadir is ClassName/FunctionName
otherwise it is only FunctionName.
"""
function_name = request.function.__name__
if not request.cls:
return datadir / function_name
class_name = request.cls.__name__
return datadir / "{0}/{1}".format(class_name, function_name)
@pytest.fixture()
def pcluster_config_reader(test_datadir, vpc_stack, request, region):
"""
Define a fixture to render pcluster config templates associated to the running test.
The config for a given test is a pcluster.config.yaml file stored in the configs_datadir folder.
The config can be written by using Jinja2 template engine.
The current renderer already replaces placeholders for current keys:
{{ region }}, {{ os }}, {{ instance }}, {{ scheduler}}, {{ key_name }},
{{ vpc_id }}, {{ public_subnet_id }}, {{ private_subnet_id }}
The current renderer injects options for custom templates and packages in case these
are passed to the cli and not present already in the cluster config.
Also sanity_check is set to true by default unless explicitly set in config.
:return: a _config_renderer(**kwargs) function which gets as input a dictionary of values to replace in the template
"""
def _config_renderer(config_file="pcluster.config.yaml", **kwargs):
config_file_path = test_datadir / config_file
if not os.path.isfile(config_file_path):
raise FileNotFoundError(f"Cluster config file not found in the expected dir {config_file_path}")
default_values = _get_default_template_values(vpc_stack, request)
file_loader = FileSystemLoader(str(test_datadir))
env = Environment(loader=file_loader)
rendered_template = env.get_template(config_file).render(**{**default_values, **kwargs})
config_file_path.write_text(rendered_template)
if not config_file.endswith("image.config.yaml"):
inject_additional_config_settings(config_file_path, request, region)
else:
inject_additional_image_configs_settings(config_file_path, request)
return config_file_path
return _config_renderer
def inject_additional_image_configs_settings(image_config, request):
with open(image_config, encoding="utf-8") as conf_file:
config_content = yaml.load(conf_file, Loader=yaml.SafeLoader)
if request.config.getoption("createami_custom_chef_cookbook") and not dict_has_nested_key(
config_content, ("DevSettings", "Cookbook", "ChefCookbook")
):
dict_add_nested_key(
config_content,
request.config.getoption("createami_custom_chef_cookbook"),
("DevSettings", "Cookbook", "ChefCookbook"),
)
for option, config_param in [
("custom_awsbatchcli_package", "AwsBatchCliPackage"),
("createami_custom_node_package", "NodePackage"),
]:
if request.config.getoption(option) and not dict_has_nested_key(config_content, ("DevSettings", config_param)):
dict_add_nested_key(config_content, request.config.getoption(option), ("DevSettings", config_param))
with open(image_config, "w", encoding="utf-8") as conf_file:
yaml.dump(config_content, conf_file)
def inject_additional_config_settings(cluster_config, request, region): # noqa C901
with open(cluster_config, encoding="utf-8") as conf_file:
config_content = yaml.safe_load(conf_file)
if request.config.getoption("custom_chef_cookbook") and not dict_has_nested_key(
config_content, ("DevSettings", "Cookbook", "ChefCookbook")
):
dict_add_nested_key(
config_content,
request.config.getoption("custom_chef_cookbook"),
("DevSettings", "Cookbook", "ChefCookbook"),
)
if request.config.getoption("custom_ami") and not dict_has_nested_key(config_content, ("Image", "CustomAmi")):
dict_add_nested_key(config_content, request.config.getoption("custom_ami"), ("Image", "CustomAmi"))
if not dict_has_nested_key(config_content, ("DevSettings", "AmiSearchFilters")):
if (
request.config.getoption("pcluster_git_ref")
or request.config.getoption("cookbook_git_ref")
or request.config.getoption("node_git_ref")
):
tags = []
if request.config.getoption("pcluster_git_ref"):
tags.append(
{"Key": "build:parallelcluster:cli_ref", "Value": request.config.getoption("pcluster_git_ref")}
)
if request.config.getoption("cookbook_git_ref"):
tags.append(
{"Key": "build:parallelcluster:cookbook_ref", "Value": request.config.getoption("cookbook_git_ref")}
)
if request.config.getoption("node_git_ref"):
tags.append(
{"Key": "build:parallelcluster:node_ref", "Value": request.config.getoption("node_git_ref")}
)
tags.append({"Key": "parallelcluster:build_status", "Value": "available"})
dict_add_nested_key(config_content, tags, ("DevSettings", "AmiSearchFilters", "Tags"))
if request.config.getoption("ami_owner"):
dict_add_nested_key(
config_content, request.config.getoption("ami_owner"), ("DevSettings", "AmiSearchFilters", "Owner")
)
# Additional instance types data is copied it into config files to make it available at cluster creation
instance_types_data = request.config.getoption("instance_types_data", None)
if instance_types_data:
dict_add_nested_key(config_content, json.dumps(instance_types_data), ("DevSettings", "InstanceTypesData"))
for option, config_param in [("pre_install", "OnNodeStart"), ("post_install", "OnNodeConfigured")]:
if request.config.getoption(option):
if not dict_has_nested_key(config_content, ("HeadNode", "CustomActions", config_param)):
dict_add_nested_key(
config_content,
request.config.getoption(option),
("HeadNode", "CustomActions", config_param, "Script"),
)
_add_policy_for_pre_post_install(config_content["HeadNode"], option, request, region)
scheduler = config_content["Scheduling"]["Scheduler"]
if scheduler != "awsbatch":
for queue in config_content["Scheduling"][f"{scheduler.capitalize()}Queues"]:
if not dict_has_nested_key(queue, ("CustomActions", config_param)):
dict_add_nested_key(
queue, request.config.getoption(option), ("CustomActions", config_param, "Script")
)
_add_policy_for_pre_post_install(queue, option, request, region)
for option, config_param in [
("custom_awsbatchcli_package", "AwsBatchCliPackage"),
("custom_node_package", "NodePackage"),
]:
if request.config.getoption(option) and not dict_has_nested_key(config_content, ("DevSettings", config_param)):
dict_add_nested_key(config_content, request.config.getoption(option), ("DevSettings", config_param))
with open(cluster_config, "w", encoding="utf-8") as conf_file:
yaml.dump(config_content, conf_file)
def _add_policy_for_pre_post_install(node_config, custom_option, request, region):
match = re.match(r"s3://(.*?)/(.*)", request.config.getoption(custom_option))
if not match or len(match.groups()) < 2:
logging.info("{0} script is not an S3 URL".format(custom_option))
else:
additional_iam_policies = {"Policy": f"arn:{get_arn_partition(region)}:iam::aws:policy/AmazonS3ReadOnlyAccess"}
if dict_has_nested_key(node_config, ("Iam", "InstanceRole")) or dict_has_nested_key(
node_config, ("Iam", "InstanceProfile")
):
# AdditionalIamPolicies, InstanceRole or InstanceProfile can not co-exist
logging.info(
"InstanceRole/InstanceProfile is specified, "
f"skipping insertion of AdditionalIamPolicies: {additional_iam_policies}"
)
else:
logging.info(
f"{custom_option} script is an S3 URL, adding AdditionalIamPolicies: {additional_iam_policies}"
)
if dict_has_nested_key(node_config, ("Iam", "AdditionalIamPolicies")):
if additional_iam_policies not in node_config["Iam"]["AdditionalIamPolicies"]:
node_config["Iam"]["AdditionalIamPolicies"].append(additional_iam_policies)
else:
dict_add_nested_key(node_config, [additional_iam_policies], ("Iam", "AdditionalIamPolicies"))
def _get_default_template_values(vpc_stack, request):
"""Build a dictionary of default values to inject in the jinja templated cluster configs."""
default_values = get_vpc_snakecase_value(vpc_stack)
default_values.update({dimension: request.node.funcargs.get(dimension) for dimension in DIMENSIONS_MARKER_ARGS})
default_values["key_name"] = request.config.getoption("key_name")
scheduler = request.node.funcargs.get("scheduler")
default_values["imds_secured"] = scheduler in SCHEDULERS_SUPPORTING_IMDS_SECURED
return default_values
@pytest.fixture(scope="session")
def cfn_stacks_factory(request):
"""Define a fixture to manage the creation and destruction of CloudFormation stacks."""
factory = CfnStacksFactory(request.config.getoption("credential"))
yield factory
if not request.config.getoption("no_delete"):
factory.delete_all_stacks()
else:
logging.warning("Skipping deletion of CFN stacks because --no-delete option is set")
@pytest.fixture()
@pytest.mark.usefixtures("setup_credentials")
def parameterized_cfn_stacks_factory(request):
"""Define a fixture that returns a parameterized stack factory and manages the stack creation and deletion."""
factory = CfnStacksFactory(request.config.getoption("credential"))
def _create_stack(region, template_path, stack_prefix="", parameters=None, capabilities=None):
file_content = extract_template(template_path)
stack = CfnStack(
name=generate_stack_name(stack_prefix, request.config.getoption("stackname_suffix")),
region=region,
template=file_content,
parameters=parameters or [],
capabilities=capabilities or [],
)
factory.create_stack(stack)
return stack
def extract_template(template_path):
with open(template_path, encoding="utf-8") as cfn_file:
file_content = cfn_file.read()
return file_content
yield _create_stack
factory.delete_all_stacks()
AVAILABILITY_ZONE_OVERRIDES = {
# c5.xlarge is not supported in use1-az3
# FSx Lustre file system creation is currently not supported for use1-az3
# m6g.xlarge is not supported in use1-az2 or use1-az3
# p4d.24xlarge is only available on use1-az2
"us-east-1": ["use1-az2"],
# some instance type is only supported in use2-az2
"us-east-2": ["use2-az2"],
# c4.xlarge is not supported in usw2-az4
# p4d.24xlarge is only available on uw2-az2
"us-west-2": ["usw2-az2"],
# c5.xlarge is not supported in apse2-az3
"ap-southeast-2": ["apse2-az1", "apse2-az2"],
# m6g.xlarge is not supported in apne1-az2
"ap-northeast-1": ["apne1-az4", "apne1-az1"],
# c4.xlarge is not supported in apne2-az2
"ap-northeast-2": ["apne2-az1", "apne2-az3"],
# c5.xlarge is not supported in apse1-az3
"ap-southeast-1": ["apse1-az2", "apse1-az1"],
# c4.xlarge is not supported in aps1-az2
"ap-south-1": ["aps1-az1", "aps1-az3"],
# NAT Gateway not available in sae1-az2 , c5n.18xlarge is not supported in sae1-az3
"sa-east-1": ["sae1-az1"],
# m6g.xlarge instances not available in euw1-az3
"eu-west-1": ["euw1-az1", "euw1-az2"],
# io2 EBS volumes not available in cac1-az4
"ca-central-1": ["cac1-az1", "cac1-az2"],
# instance can only be launch in placement group in eun1-az2
"eu-north-1": ["eun1-az2"],
# g3.8xlarge is not supported in euc1-az1
"eu-central-1": ["euc1-az2", "euc1-az3"],
# FSx not available in cnn1-az4
"cn-north-1": ["cnn1-az1", "cnn1-az2"],
}
@pytest.fixture(scope="function")
def random_az_selector(request):
"""Select random AZs for a given region."""
def _get_random_availability_zones(region, num_azs=1, default_value=None):
"""Return num_azs random AZs (in the form of AZ names, e.g. 'us-east-1a') for the given region."""
az_ids = AVAILABILITY_ZONE_OVERRIDES.get(region, [])
if az_ids:
az_id_to_az_name_map = get_az_id_to_az_name_map(region, request.config.getoption("credential"))
sample = random.sample([az_id_to_az_name_map.get(az_id, default_value) for az_id in az_ids], k=num_azs)
else:
sample = [default_value] * num_azs
return sample[0] if num_azs == 1 else sample
return _get_random_availability_zones
@pytest.fixture(scope="class", autouse=True)
def setup_credentials(region, request):
"""Setup environment for the integ tests"""
with aws_credential_provider(region, request.config.getoption("credential")):
yield
# FixMe: double check if this fixture introduce unnecessary implication.
# The alternative way is to use --region for all cluster operations.
@pytest.fixture(scope="class", autouse=True)
def setup_env_variable(region):
"""Setup environment for the integ tests"""
os.environ["AWS_DEFAULT_REGION"] = region
yield
del os.environ["AWS_DEFAULT_REGION"]
def get_az_id_to_az_name_map(region, credential):
"""Return a dict mapping AZ IDs (e.g, 'use1-az2') to AZ names (e.g., 'us-east-1c')."""
# credentials are managed manually rather than via setup_sts_credentials because this function
# is called by a session-scoped fixture, which cannot make use of a class-scoped fixture.
with aws_credential_provider(region, credential):
ec2_client = boto3.client("ec2", region_name=region)
return {
entry.get("ZoneId"): entry.get("ZoneName")
for entry in ec2_client.describe_availability_zones().get("AvailabilityZones")
}
def get_availability_zones(region, credential):
"""
Return a list of availability zones for the given region.
Note that this function is called by the vpc_stacks fixture. Because vcp_stacks is session-scoped,
it cannot utilize setup_sts_credentials, which is required in opt-in regions in order to call
describe_availability_zones.
"""
az_list = []
with aws_credential_provider(region, credential):
client = boto3.client("ec2", region_name=region)
response_az = client.describe_availability_zones(
Filters=[
{"Name": "region-name", "Values": [str(region)]},
{"Name": "zone-type", "Values": ["availability-zone"]},
]
)
for az in response_az.get("AvailabilityZones"):
az_list.append(az.get("ZoneName"))
return az_list
@pytest.fixture(scope="session", autouse=True)
def initialize_cli_creds(cfn_stacks_factory, request):
if request.config.getoption("use_default_iam_credentials"):
logging.info("Using default IAM credentials to run pcluster commands")
return
regions = request.config.getoption("regions") or get_all_regions(request.config.getoption("tests_config"))
for region in regions:
logging.info("Creating IAM roles for pcluster CLI")
stack_name = generate_stack_name("integ-tests-iam-user-role", request.config.getoption("stackname_suffix"))
stack_template_path = os.path.join("..", "iam_policies", "user-role.cfn.yaml")
with open(stack_template_path, encoding="utf-8") as stack_template_file:
stack_template_data = stack_template_file.read()
stack = CfnStack(name=stack_name, region=region, capabilities=["CAPABILITY_IAM"], template=stack_template_data)
cfn_stacks_factory.create_stack(stack)
# register providers
register_cli_credentials_for_region(region, stack.cfn_outputs["ParallelClusterUserRole"])
@pytest.fixture(scope="session", autouse=True)
def vpc_stacks(cfn_stacks_factory, request):
"""Create VPC used by integ tests in all configured regions."""
regions = request.config.getoption("regions") or get_all_regions(request.config.getoption("tests_config"))
vpc_stacks = {}
for region in regions:
# Creating private_subnet_different_cidr in a different AZ for test_efs
# To-do: isolate this logic and create a compute subnet in different AZ than head node in test_efs
# if region has a non-empty list in AVAILABILITY_ZONE_OVERRIDES, select a subset of those AZs
credential = request.config.getoption("credential")
az_ids_for_region = AVAILABILITY_ZONE_OVERRIDES.get(region, [])
if az_ids_for_region:
az_id_to_az_name = get_az_id_to_az_name_map(region, credential)
az_names = [az_id_to_az_name.get(az_id) for az_id in az_ids_for_region]
# if only one AZ can be used for the given region, use it multiple times
if len(az_names) == 1:
az_names *= 2
availability_zones = random.sample(az_names, k=2)
# otherwise, select a subset of all AZs in the region
else:
az_list = get_availability_zones(region, credential)
# if number of available zones is smaller than 2, available zones should be [None, None]
if len(az_list) < 2:
availability_zones = [None, None]
else:
availability_zones = random.sample(az_list, k=2)
# Subnets visual representation:
# http://www.davidc.net/sites/default/subnets/subnets.html?network=192.168.0.0&mask=16&division=7.70
public_subnet = SubnetConfig(
name="Public",
cidr="192.168.32.0/19", # 8190 IPs
map_public_ip_on_launch=True,
has_nat_gateway=True,
availability_zone=availability_zones[0],
default_gateway=Gateways.INTERNET_GATEWAY,
)
private_subnet = SubnetConfig(
name="Private",
cidr="192.168.64.0/18", # 16382 IPs
map_public_ip_on_launch=False,
has_nat_gateway=False,
availability_zone=availability_zones[0],
default_gateway=Gateways.NAT_GATEWAY,
)
private_subnet_different_cidr = SubnetConfig(
name="PrivateAdditionalCidr",
cidr="192.168.128.0/17", # 32766 IPs
map_public_ip_on_launch=False,
has_nat_gateway=False,
availability_zone=availability_zones[1],
default_gateway=Gateways.NAT_GATEWAY,
)
no_internet_subnet = SubnetConfig(
name="NoInternet",
cidr="192.168.16.0/20", # 4094 IPs
map_public_ip_on_launch=False,
has_nat_gateway=False,
availability_zone=availability_zones[0],
default_gateway=Gateways.NONE,
)
vpc_config = VPCConfig(
cidr="192.168.0.0/17",
additional_cidr_blocks=["192.168.128.0/17"],
subnets=[public_subnet, private_subnet, private_subnet_different_cidr, no_internet_subnet],
)
template = NetworkTemplateBuilder(vpc_configuration=vpc_config, availability_zone=availability_zones[0]).build()
vpc_stacks[region] = _create_vpc_stack(request, template, region, cfn_stacks_factory)
return vpc_stacks
@pytest.fixture(scope="class")
@pytest.mark.usefixtures("clusters_factory", "images_factory")
def create_roles_stack(request, region):
"""Define a fixture that returns a stack factory for IAM roles."""
logging.info("Creating IAM roles stack")
factory = CfnStacksFactory(request.config.getoption("credential"))
def _create_stack(stack_prefix, roles_file):
stack_template_path = os.path.join("..", "iam_policies", roles_file)
template_data = read_template(stack_template_path)
stack = CfnStack(
name=generate_stack_name(stack_prefix, request.config.getoption("stackname_suffix")),
region=region,
template=template_data,
capabilities=["CAPABILITY_IAM"],
)
factory.create_stack(stack)
return stack
def read_template(template_path):
with open(template_path, encoding="utf-8") as cfn_file:
file_content = cfn_file.read()
return file_content
yield _create_stack
if not request.config.getoption("no_delete"):
factory.delete_all_stacks()
else:
logging.warning("Skipping deletion of IAM roles stack because --no-delete option is set")
def _create_iam_policies(iam_policy_name, region, policy_filename):
logging.info("Creating iam policy {0}...".format(iam_policy_name))
file_loader = FileSystemLoader(pkg_resources.resource_filename(__name__, "/resources"))
env = Environment(loader=file_loader, trim_blocks=True, lstrip_blocks=True)
partition = get_arn_partition(region)
account_id = (
boto3.client("sts", region_name=region, endpoint_url=get_sts_endpoint(region))
.get_caller_identity()
.get("Account")
)
parallel_cluster_instance_policy = env.get_template(policy_filename).render(
partition=partition, region=region, account_id=account_id, cluster_bucket_name="parallelcluster-*"
)
return boto3.client("iam", region_name=region).create_policy(
PolicyName=iam_policy_name, PolicyDocument=parallel_cluster_instance_policy
)["Policy"]["Arn"]
@pytest.fixture(scope="class")
def vpc_stack(vpc_stacks, region):
return vpc_stacks[region]
@pytest.fixture(scope="session")
def public_ecr_image_uri(request):
return request.config.getoption("public_ecr_image_uri")
@pytest.fixture(scope="session")
def api_uri(request):
return request.config.getoption("api_uri")
@pytest.fixture(scope="session")
def api_definition_s3_uri(request):
return request.config.getoption("api_definition_s3_uri")
@pytest.fixture(scope="session")
def api_infrastructure_s3_uri(request):
return request.config.getoption("api_infrastructure_s3_uri")
# If stack creation fails it'll retry once more. This is done to mitigate failures due to resources
# not available in randomly picked AZs.
@retry(
stop_max_attempt_number=2,
wait_fixed=5000,
retry_on_exception=lambda exception: not isinstance(exception, KeyboardInterrupt),
)
def _create_vpc_stack(request, template, region, cfn_stacks_factory):
if request.config.getoption("vpc_stack"):
logging.info("Using stack {0} in region {1}".format(request.config.getoption("vpc_stack"), region))
stack = CfnStack(name=request.config.getoption("vpc_stack"), region=region, template=template.to_json())
else:
stack = CfnStack(
name=generate_stack_name("integ-tests-vpc", request.config.getoption("stackname_suffix")),
region=region,
template=template.to_json(),
)
cfn_stacks_factory.create_stack(stack)
return stack
@pytest.fixture(scope="class")
def s3_bucket_factory(region):
"""
Define a fixture to create S3 buckets.
:param region: region where the test is running
:return: a function to create buckets.
"""
created_buckets = []
def _create_bucket():
bucket_name = "integ-tests-" + random_alphanumeric()
logging.info("Creating S3 bucket {0}".format(bucket_name))
create_s3_bucket(bucket_name, region)
created_buckets.append((bucket_name, region))
return bucket_name
yield _create_bucket
for bucket in created_buckets:
logging.info("Deleting S3 bucket {0}".format(bucket[0]))
try:
delete_s3_bucket(bucket_name=bucket[0], region=bucket[1])
except Exception as e:
logging.error("Failed deleting bucket {0} with exception: {1}".format(bucket[0], e))
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_makereport(item, call):
"""Making test result information available in fixtures"""
# add dimension properties to report
_add_properties_to_report(item)
# execute all other hooks to obtain the report object
outcome = yield
rep = outcome.get_result()
# set a report attribute for each phase of a call, which can
# be "setup", "call", "teardown"
setattr(item, "rep_" + rep.when, rep)
if rep.when in ["setup", "call"] and rep.failed:
try:
update_failed_tests_config(item)
except Exception as e:
logging.error("Failed when generating config for failed tests: %s", e, exc_info=True)
def update_failed_tests_config(item):
out_dir = Path(item.config.getoption("output_dir"))
if not str(out_dir).endswith(".out"):
# Navigate to the parent dir in case of parallel run so that we can access the shared parent dir
out_dir = out_dir.parent
out_file = out_dir / "failed_tests_config.yaml"
logging.info("Updating failed tests config file %s", out_file)
# We need to acquire a lock first to prevent concurrent edits to this file
with FileLock(str(out_file) + ".lock"):
failed_tests = {"test-suites": {}}
if out_file.is_file():
with open(str(out_file), encoding="utf-8") as f:
failed_tests = yaml.safe_load(f)
# item.node.nodeid example:
# 'dcv/test_dcv.py::test_dcv_configuration[eu-west-1-c5.xlarge-centos7-slurm-8443-0.0.0.0/0-/shared]'
feature, test_id = item.nodeid.split("/", 1)
test_id = test_id.split("[", 1)[0]
dimensions = {}
for dimension in DIMENSIONS_MARKER_ARGS:
value = item.callspec.params.get(dimension)
if value:
dimensions[dimension + "s"] = [value]
if not dict_has_nested_key(failed_tests, ("test-suites", feature, test_id)):
dict_add_nested_key(failed_tests, [], ("test-suites", feature, test_id, "dimensions"))
if dimensions not in failed_tests["test-suites"][feature][test_id]["dimensions"]:
failed_tests["test-suites"][feature][test_id]["dimensions"].append(dimensions)
with open(out_file, "w", encoding="utf-8") as f:
yaml.dump(failed_tests, f)
@pytest.fixture()
def architecture(request, instance, region):
"""Return a string describing the architecture supported by the given instance type."""
supported_architecture = request.config.cache.get(f"{instance}/architecture", None)
if supported_architecture is None:
logging.info(f"Getting supported architecture for instance type {instance}")
supported_architecture = get_architecture_supported_by_instance_type(instance, region)
request.config.cache.set(f"{instance}/architecture", supported_architecture)
return supported_architecture
@pytest.fixture()
def network_interfaces_count(request, instance, region):
"""Return the number of network interfaces for the given instance type."""
network_interfaces_count = request.config.cache.get(f"{instance}/network_interfaces_count", None)
if network_interfaces_count is None:
logging.info(f"Getting number of network interfaces for instance type {instance}")
network_interfaces_count = get_network_interfaces_count(instance, region)
request.config.cache.set(f"{instance}/network_interfaces_count", network_interfaces_count)
return network_interfaces_count
@pytest.fixture()
def default_threads_per_core(request, instance, region):
"""Return the default threads per core for the given instance type."""
# NOTE: currently, .metal instances do not contain the DefaultThreadsPerCore
# attribute in their VCpuInfo section. This is a known limitation with the
# ec2 DescribeInstanceTypes API. For these instance types an assumption
# is made that if the instance's supported architectures list includes
# x86_64 then the default is 2, otherwise it's 1.
logging.info(f"Getting defaul threads per core for instance type {instance}")
instance_type_data = get_instance_info(instance, region)
threads_per_core = instance_type_data.get("VCpuInfo", {}).get("DefaultThreadsPerCore")
if threads_per_core is None:
supported_architectures = instance_type_data.get("ProcessorInfo", {}).get("SupportedArchitectures", [])
threads_per_core = 2 if "x86_64" in supported_architectures else 1
logging.info(f"Defaul threads per core for instance type {instance} : {threads_per_core}")
return threads_per_core
@pytest.fixture(scope="session")
def key_name(request):
"""Return the EC2 key pair name to be used."""
return request.config.getoption("key_name")
@pytest.fixture()
def pcluster_ami_without_standard_naming(region, os, architecture):
"""
Define a fixture to manage the creation and deletion of AMI without standard naming.
This AMI is used to test the validation of pcluster version in Cookbook
"""
ami_id = None
def _pcluster_ami_without_standard_naming(version):
nonlocal ami_id
ami_id = retrieve_pcluster_ami_without_standard_naming(region, os, version, architecture)
return ami_id
yield _pcluster_ami_without_standard_naming
if ami_id:
client = boto3.client("ec2", region_name=region)
client.deregister_image(ImageId=ami_id)
@pytest.fixture(scope="class")
def ami_copy(region):
"""
Define a fixture to manage the copy and deletion of AMI.
This AMI is used to test head node and compute node AMI update
"""
copy_ami_id = None
client = boto3.client("ec2", region_name=region)
def _copy_image(image_id, test_name):
nonlocal copy_ami_id
copy_ami_id = client.copy_image(
Name=f"aws-parallelcluster-copied-image-{test_name}", SourceImageId=image_id, SourceRegion=region
).get("ImageId")
# Created tag for copied image to be filtered by cleanup ami pipeline
client.create_tags(
Resources=[
f"{copy_ami_id}",
],
Tags=[
{
"Key": "parallelcluster:image_id",
"Value": f"aws-parallelcluster-copied-image-{test_name}",
},
{
"Key": "parallelcluster:build_status",
"Value": "available",
},
],
)
return copy_ami_id
yield _copy_image
if copy_ami_id:
client = boto3.client("ec2", region_name=region)
copied_image_info = client.describe_images(ImageIds=[copy_ami_id])
logging.info("Deregister copied AMI.")
client.deregister_image(ImageId=copy_ami_id)
try:
for block_device_mapping in copied_image_info.get("Images")[0].get("BlockDeviceMappings"):
if block_device_mapping.get("Ebs"):
client.delete_snapshot(SnapshotId=block_device_mapping.get("Ebs").get("SnapshotId"))
except IndexError as e:
logging.error("Delete copied AMI snapshot failed due to %s", e)
@pytest.fixture()
def mpi_variants(architecture):
variants = ["openmpi"]
if architecture == "x86_64":
variants.append("intelmpi")
return variants
|
the-stack_0_11870 | # -*- coding: utf-8 -*-
'''
Exodus Add-on
Copyright (C) 2016 Exodus
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urlparse
from resources.lib.modules import client
from resources.lib.modules import directstream
class source:
def __init__(self):
self.domains = ['rainierland.com']
self.base_link = 'http://rainierland.com'
self.movie_link = '/movie/%s-%s.html'
def movie(self, imdb, title, year):
try:
url = re.sub('([^\s\-\w])+', '', title.lower()).replace(' ', '-')
url = self.movie_link % (url, year)
url = urlparse.urljoin(self.base_link, url)
url = client.request(url, output='geturl')
if url == None: raise Exception()
url = urlparse.urljoin(self.base_link, url)
url = urlparse.urlparse(url).path
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
return url
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
url = urlparse.urljoin(self.base_link, url)
r = client.request(url)
r = client.parseDOM(r, 'div', attrs = {'class': 'screen fluid-width-video-wrapper'})[0]
r = re.findall('src\s*=\s*"(.*?)"', r)[0]
r = urlparse.urljoin(self.base_link, r)
r = client.request(r, referer=url)
links = []
url = re.findall('src\s*=\s*"(.*?)"', r)
url = [i for i in url if 'http' in i]
for i in url:
try: links += [{'source': 'gvideo', 'url': i, 'quality': directstream.googletag(i)[0]['quality'], 'direct': True}]
except: pass
url = re.findall('(openload\.(?:io|co)/(?:embed|f)/[0-9a-zA-Z-_]+)', r)
url = ['http://' + i for i in url]
for i in url:
try: links += [{'source': 'openload.co', 'url': i, 'quality': 'HD', 'direct': False}]
except: pass
for i in links: sources.append({'source': i['source'], 'quality': i['quality'], 'provider': 'Rainierland', 'url': i['url'], 'direct': i['direct'], 'debridonly': False})
return sources
except:
return sources
def resolve(self, url):
return url
|
the-stack_0_11872 | from faker import Faker
import os
import random
import pandas as pd
CurrentDir = os.path.dirname(os.path.realpath(__file__))
def CreateFakeInformation(fake,AccountsCSV):
AccountData = pd.read_csv(os.path.join(CurrentDir,AccountsCSV),encoding='latin-1')
AccountDF = pd.DataFrame(AccountData)
DataColumns = ['Phone','Email','Bank Account Number','Federal Tax ID#','Social Security Number']
for i in AccountDF.index:
FakeInfo = [fake.phone_number(),fake.email(),random.randint(100000,9999999),random.randint(100000,9999999),random.randint(100000,9999999)]
for j in range(0,len(DataColumns)):
AccountDF[DataColumns[j]][i] = FakeInfo[j]
FakeAccountsFinalFile = "FakeInfoAccounts.csv"
while(os.path.exists(os.path.join(CurrentDir,FakeAccountsFinalFile))):
FakeAccountsFinalFile = input("%s already exists! New Name: " %FakeAccountsFinalFile) + '.csv'
AccountDF.to_csv(os.path.join(CurrentDir,FakeAccountsFinalFile), index = False)
print("Success")
AccountsCSV = input("Accounts Data file : ")
fake = Faker('en_US')
CreateFakeInformation(fake,AccountsCSV) |
the-stack_0_11878 |
#1. 编写一个函数:
#1) 计算所有参数的和的基数倍(默认基数为base=3)
def mysum(*number):
res = 0
for i in number:
res += i
return res
def bei(a,base=3):
r = 0
r = mysum(a) * base
return r
if __name__=="__main__":
print(bei(mysum(1,3,5)))
|
the-stack_0_11881 | # Amara, universalsubtitles.org
#
# Copyright (C) 2012 Participatory Culture Foundation
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License along
# with this program. If not, see http://www.gnu.org/licenses/agpl-3.0.html.
"""babelsubs.loader -- create subtitle sets."""
import os.path
import lxml
from babelsubs import parsers
from babelsubs import storage
from babelsubs.generators.dfxp import DFXPGenerator
from babelsubs.xmlconst import *
class SubtitleLoader(object):
"""SubtitleLoader -- Create SubtitleSets
SubtitleLoader provides a way to creating SubtitleSet objects with custom
layout/styling sections. It supports both creating new subtitles and
parsing them from other formats.
"""
def __init__(self):
self.styles = []
self.regions = []
def add_style(self, xml_id, **attrib):
"""Add a custom style to the created SubtitleSets.
Each add_style() call will create a new style element in the TTML. At
least one style must be added before creating subtitles.
:param xml_id: xml:id attribute to use
:param attribs: extra attributes to set. Each attribute name will
have the TTS namespace prefixed to it.
"""
self.styles.append((xml_id, attrib))
def add_region(self, xml_id, style_id, **attrib):
"""Add a custom region to the created SubtitleSets.
Each add_region() call will create a new region element in the TTML. At
least one region must be added before creating subtitles.
The first region added will be the default region for the body.
:param xml_id: xml:id attribute to use
:param style_id: style to use for this region
:param attribs: extra attributes to set. Each attribute name will
have the TTS namespace prefixed to it.
"""
self.regions.append((xml_id, style_id, attrib))
def _empty_ttml(self, language_code, title, description, frame_rate=None,
frame_rate_multiplier=None, drop_mode=None):
if not self.styles:
raise ValueError("no styles added")
if not self.regions:
raise ValueError("no regions added")
attrib = {}
if language_code:
attrib[XML + 'lang'] = language_code
if frame_rate:
attrib[TTP + 'frameRate'] = frame_rate
if frame_rate_multiplier:
attrib[TTP + 'frameRateMultiplier'] = frame_rate_multiplier
if drop_mode == 'dropNTSC':
attrib[TTP + 'timeBase'] = 'smpte'
attrib[TTP + 'dropMode'] = 'dropNTSC'
tt = lxml.etree.Element(TTML + 'tt', attrib=attrib, nsmap={
None: TTML_NAMESPACE_URI,
'tts': TTS_NAMESPACE_URI,
'ttm': TTM_NAMESPACE_URI,
'ttp': TTP_NAMESPACE_URI,
})
head = lxml.etree.SubElement(tt, TTML + 'head')
head.append(self._create_metadata(title, description))
head.append(self._create_styling())
head.append(self._create_layout())
tt.append(self._create_empty_body())
return tt
def _create_metadata(self, title, description):
metadata = lxml.etree.Element(TTML + 'metadata')
lxml.etree.SubElement(metadata, TTM + 'title').text = title
lxml.etree.SubElement(metadata, TTM + 'description').text = description
lxml.etree.SubElement(metadata, TTM + 'copyright')
return metadata
def _create_styling(self):
styling = lxml.etree.Element(TTML + 'styling')
for (xml_id, attrib) in self.styles:
style = lxml.etree.SubElement(styling, TTML + 'style')
style.set(XML + 'id', xml_id)
for name, value in attrib.items():
style.set(TTS + name, value)
return styling
def _create_layout(self):
layout = lxml.etree.Element(TTML + 'layout')
for (xml_id, style_id, attrib) in self.regions:
region = lxml.etree.SubElement(layout, TTML + 'region')
region.set(XML + 'id', xml_id)
region.set(TTML + 'style', style_id)
for name, value in attrib.items():
region.set(TTS + name, value)
return layout
def _create_empty_body(self):
body = lxml.etree.Element(TTML + 'body', attrib={
TTML + 'region': self.regions[0][0],
})
return body
def create_new(self, language_code, title='', description='',
frame_rate=None, frame_rate_multiplier=None, drop_mode=None):
"""Create a new SubtitleSet. """
ttml = self._empty_ttml(language_code, title, description, frame_rate,
frame_rate_multiplier, drop_mode)
# add an empty div to start the subtitles
lxml.etree.SubElement(ttml.find(TTML + 'body'), TTML + 'div')
return storage.SubtitleSet.create_with_raw_ttml(ttml)
def dfxp_merge(self, subtitle_sets):
"""Create a merged DFXP file from a list of subtitle sets."""
initial_ttml = self._empty_ttml('', '', '')
return DFXPGenerator.merge_subtitles(subtitle_sets, initial_ttml)
def load(self, language_code, path):
"""Create a SubtitleSet with existing subtitles.
If path is a DFXP file, then we will simply load it and return. If
it has any other format, we will create a DFXP template using our
styles/regions and load the subtitles into that. The reason for this
is that if we are reading DFXP we don't want to ovewrite the styles
inside the file with our own.
"""
basename, ext = os.path.splitext(path)
with open(path) as f:
content = f.read()
return self.loads(language_code, content, ext[1:].lower())
def loads(self, language_code, content, file_type):
try:
parser = parsers.discover(file_type)
except KeyError:
raise TypeError("No parser for %s" % file_type)
parsed_subs = parser.parse(content,
language=language_code).to_internal()
if parser is parsers.DFXPParser:
# return the subtitles as-is
return parsed_subs
ttml = self._empty_ttml(language_code, '', '')
self._move_elements(parsed_subs._ttml.find(TTML + 'body'),
ttml.find(TTML + 'body'))
return storage.SubtitleSet.create_with_raw_ttml(ttml)
def _remove_intial_div(self, subtitle_set):
body = subtitle_set._ttml.find(TTML + 'body')
body.remove(body[0])
def _move_elements(self, source, dest):
"""Move children from one etree element to another."""
children = list(source)
source.clear()
for child in children:
dest.append(child)
|
the-stack_0_11883 | #!/usr/bin/env python3
"""
USAGE:
yb_mass_column_update.py [options]
PURPOSE:
Update the value of multiple columns.
OPTIONS:
See the command line help message for all options.
(yb_mass_column_update.py --help)
Output:
The update statements for the requested set of columns.
"""
import sys
from yb_common import StoredProc, Util
class mass_column_update(Util):
"""Issue the ybsql command used to list the column names comprising an
object.
"""
config = {
'description': (
'Update the value of multiple columns.'
'\n'
'\nnote:'
'\n Mass column updates may cause performance issues due to the change '
'\n of how the data is ordered in storage.')
, 'optional_args_single': []
, 'optional_args_multi': ['owner', 'schema', 'table', 'column', 'datatype']
, 'usage_example': {
'cmd_line_args': "@$HOME/conn.args --datatype_like 'CHAR%' --update_where_clause \"<column> = 'NULL'\" --set_clause NULL --"
, 'file_args': [Util.conn_args_file] }
, 'db_filter_args': {'owner':'tableowner', 'schema':'schemaname', 'table':'tablename', 'column':'columnname', 'datatype':'datatype'} }
def execute(self):
self.cmd_results = StoredProc('yb_mass_column_update_p', self.db_conn).call_proc_as_anonymous_block(
args = {
'a_update_where_clause' : self.args_handler.args.update_where_clause
, 'a_set_clause' : self.args_handler.args.set_clause
, 'a_column_filter_clause' : self.db_filter_sql()
, 'a_exec_updates' : ('TRUE' if self.args_handler.args.exec_updates else 'FALSE')}
, pre_sql = self.args_handler.args.pre_sql
, post_sql = self.args_handler.args.post_sql)
def additional_args(self):
args_mass_r_grp = self.args_handler.args_parser.add_argument_group('required mass update arguments')
args_mass_r_grp.add_argument(
"--update_where_clause", required=True
, help=("update column only if this boolean clause is satisfied, like: "
"'LENGTH(<column>)<>LENGTH(RTRIM(<column>))', "
"Note: the special use of the string '<column>' ")
)
args_mass_r_grp.add_argument(
"--set_clause", required=True
, help=("Set the column to this value, Like; "
"'RTRIM(<column>)', "
"Note: the special use of the string '<column>' ")
)
args_mass_o_grp = self.args_handler.args_parser.add_argument_group('optional mass update arguments')
args_mass_o_grp.add_argument(
"--exec_updates"
, action='store_true'
, help=("defaults to False and only prints the update statements. When set "
"to True, execute the update statements.")
)
args_mass_o_grp.add_argument("--pre_sql", default=''
, help="SQL to run before the chunking DML, only runs if execute_chunk_dml is set")
args_mass_o_grp.add_argument("--post_sql", default=''
, help="SQL to run after the chunking DML, only runs if execute_chunk_dml is set")
def additional_args_process(self):
if '<column>' not in self.args_handler.args.update_where_clause:
self.args_handler.args_parser.error("UPDATE_WHERE_CLAUSE must contain the string '<column>'")
if not self.args_handler.args.exec_updates:
self.args_handler.args.pre_sql = ''
self.args_handler.args.post_sql = ''
self.args_handler.db_filter_args.schema_set_all_if_none()
def main():
mcu = mass_column_update()
sys.stdout.write('-- Running mass column update.\n')
mcu.execute()
mcu.cmd_results.write(tail='-- Completed mass column update.\n')
exit(mcu.cmd_results.exit_code)
if __name__ == "__main__":
main() |
the-stack_0_11884 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
__dir__ = os.path.dirname(os.path.abspath(__file__))
sys.path.append(__dir__)
sys.path.append(os.path.abspath(os.path.join(__dir__, '../..')))
os.environ["FLAGS_allocator_strategy"] = 'auto_growth'
import cv2
import copy
import numpy as np
import math
import time
import traceback
import pyxlpr.ppocr.tools.infer.utility as utility
from pyxlpr.ppocr.postprocess import build_post_process
from pyxlpr.ppocr.utils.logging import get_logger
from pyxlpr.ppocr.utils.utility import get_image_file_list, check_and_read_gif
logger = get_logger()
class TextClassifier(object):
def __init__(self, args):
self.cls_image_shape = [int(v) for v in args.cls_image_shape.split(",")]
self.cls_batch_num = args.cls_batch_num
self.cls_thresh = args.cls_thresh
postprocess_params = {
'name': 'ClsPostProcess',
"label_list": args.label_list,
}
self.postprocess_op = build_post_process(postprocess_params)
self.predictor, self.input_tensor, self.output_tensors, _ = \
utility.create_predictor(args, 'cls', logger)
self.use_onnx = args.use_onnx
def resize_norm_img(self, img):
imgC, imgH, imgW = self.cls_image_shape
h = img.shape[0]
w = img.shape[1]
ratio = w / float(h)
if math.ceil(imgH * ratio) > imgW:
resized_w = imgW
else:
resized_w = int(math.ceil(imgH * ratio))
resized_image = cv2.resize(img, (resized_w, imgH))
resized_image = resized_image.astype('float32')
if self.cls_image_shape[0] == 1:
resized_image = resized_image / 255
resized_image = resized_image[np.newaxis, :]
else:
resized_image = resized_image.transpose((2, 0, 1)) / 255
resized_image -= 0.5
resized_image /= 0.5
padding_im = np.zeros((imgC, imgH, imgW), dtype=np.float32)
padding_im[:, :, 0:resized_w] = resized_image
return padding_im
def __call__(self, img_list):
img_list = copy.deepcopy(img_list)
img_num = len(img_list)
# Calculate the aspect ratio of all text bars
width_list = []
for img in img_list:
width_list.append(img.shape[1] / float(img.shape[0]))
# Sorting can speed up the cls process
indices = np.argsort(np.array(width_list))
cls_res = [['', 0.0]] * img_num
batch_num = self.cls_batch_num
elapse = 0
for beg_img_no in range(0, img_num, batch_num):
end_img_no = min(img_num, beg_img_no + batch_num)
norm_img_batch = []
max_wh_ratio = 0
starttime = time.time()
for ino in range(beg_img_no, end_img_no):
h, w = img_list[indices[ino]].shape[0:2]
wh_ratio = w * 1.0 / h
max_wh_ratio = max(max_wh_ratio, wh_ratio)
for ino in range(beg_img_no, end_img_no):
norm_img = self.resize_norm_img(img_list[indices[ino]])
norm_img = norm_img[np.newaxis, :]
norm_img_batch.append(norm_img)
norm_img_batch = np.concatenate(norm_img_batch)
norm_img_batch = norm_img_batch.copy()
if self.use_onnx:
input_dict = {}
input_dict[self.input_tensor.name] = norm_img_batch
outputs = self.predictor.run(self.output_tensors, input_dict)
prob_out = outputs[0]
else:
self.input_tensor.copy_from_cpu(norm_img_batch)
self.predictor.run()
prob_out = self.output_tensors[0].copy_to_cpu()
self.predictor.try_shrink_memory()
cls_result = self.postprocess_op(prob_out)
elapse += time.time() - starttime
for rno in range(len(cls_result)):
label, score = cls_result[rno]
cls_res[indices[beg_img_no + rno]] = [label, score]
if '180' in label and score > self.cls_thresh:
img_list[indices[beg_img_no + rno]] = cv2.rotate(
img_list[indices[beg_img_no + rno]], 1)
return img_list, cls_res, elapse
def main(args):
image_file_list = get_image_file_list(args.image_dir)
text_classifier = TextClassifier(args)
valid_image_file_list = []
img_list = []
for image_file in image_file_list:
img, flag = check_and_read_gif(image_file)
if not flag:
img = cv2.imread(image_file)
if img is None:
logger.info("error in loading image:{}".format(image_file))
continue
valid_image_file_list.append(image_file)
img_list.append(img)
try:
img_list, cls_res, predict_time = text_classifier(img_list)
except Exception as E:
logger.info(traceback.format_exc())
logger.info(E)
exit()
for ino in range(len(img_list)):
logger.info("Predicts of {}:{}".format(valid_image_file_list[ino],
cls_res[ino]))
if __name__ == "__main__":
main(utility.parse_args())
|
the-stack_0_11885 | """Implemented support for Common Workflow Language (CWL) for Toil."""
# Copyright (C) 2015 Curoverse, Inc
# Copyright (C) 2015-2021 Regents of the University of California
# Copyright (C) 2019-2020 Seven Bridges
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# For an overview of how this all works, see discussion in
# docs/architecture.rst
import argparse
import copy
import datetime
import functools
import json
import logging
import os
import stat
import sys
import tempfile
import textwrap
import urllib
import uuid
from typing import (
Any,
Dict,
Iterator,
List,
Mapping,
MutableMapping,
MutableSequence,
Optional,
Text,
TextIO,
Tuple,
TypeVar,
Union,
cast,
)
from urllib import parse as urlparse
import cwltool.builder
import cwltool.command_line_tool
import cwltool.errors
import cwltool.expression
import cwltool.load_tool
import cwltool.main
import cwltool.provenance
import cwltool.resolver
import cwltool.stdfsaccess
import schema_salad.ref_resolver
from cwltool.loghandler import _logger as cwllogger
from cwltool.loghandler import defaultStreamHandler
from cwltool.mutation import MutationManager
from cwltool.pathmapper import MapperEnt, PathMapper, downloadHttpFile
from cwltool.process import (
Process,
add_sizes,
compute_checksums,
fill_in_defaults,
shortname,
)
from cwltool.secrets import SecretStore
from cwltool.software_requirements import (
DependenciesConfiguration,
get_container_from_software_requirements,
)
from cwltool.utils import (
CWLObjectType,
adjustDirObjs,
adjustFileObjs,
aslist,
convert_pathsep_to_unix,
get_listing,
normalizeFilesDirs,
visit_class,
)
from ruamel.yaml.comments import CommentedMap
from schema_salad import validate
from schema_salad.schema import Names
from schema_salad.sourceline import SourceLine
from toil.common import Config, Toil, addOptions
from toil.fileStores import FileID
from toil.fileStores.abstractFileStore import AbstractFileStore
from toil.job import Job
from toil.jobStores.abstractJobStore import NoSuchFileException, NoSuchJobStoreException
from toil.version import baseVersion
logger = logging.getLogger(__name__)
# Define internal jobs we should avoid submitting to batch systems and logging
CWL_INTERNAL_JOBS = (
"CWLJobWrapper",
"CWLWorkflow",
"CWLScatter",
"CWLGather",
"ResolveIndirect",
)
def cwltoil_was_removed():
"""Complain about deprecated entrypoint."""
raise RuntimeError(
'Please run with "toil-cwl-runner" instead of "cwltoil" (which has been removed).'
)
# The job object passed into CWLJob and CWLWorkflow
# is a dict mapping to tuple of (key, dict)
# the final dict is derived by evaluating each
# tuple looking up the key in the supplied dict.
#
# This is necessary because Toil jobs return a single value (a dict)
# but CWL permits steps to have multiple output parameters that may
# feed into multiple other steps. This transformation maps the key in the
# output object to the correct key of the input object.
class UnresolvedDict(dict):
"""Tag to indicate a dict contains promises that must be resolved."""
class SkipNull:
"""
Internal sentinel object.
Indicates a null value produced by each port of a skipped conditional step.
The CWL 1.2 specification calls for treating this the exactly the same as a
null value.
"""
def filter_skip_null(name: str, value: Any) -> Any:
"""
Recursively filter out SkipNull objects from 'value'.
:param name: Name of port producing this value.
Only used when we find an unhandled null from a conditional step
and we print out a warning. The name allows the user to better
localize which step/port was responsible for the unhandled null.
:param value: port output value object
"""
err_flag = [False]
value = _filter_skip_null(value, err_flag)
if err_flag[0]:
logger.warning(
"In %s, SkipNull result found and cast to None. \n"
"You had a conditional step that did not run, "
"but you did not use pickValue to handle the skipped input." % name
)
return value
def _filter_skip_null(value: Any, err_flag: List[bool]) -> Any:
"""
Private implementation for recursively filtering out SkipNull objects from 'value'.
:param value: port output value object
:param err_flag: A pass by reference boolean (passed by enclosing in a list) that
allows us to flag, at any level of recursion, that we have encountered
a SkipNull.
"""
if isinstance(value, SkipNull):
err_flag[0] = True
value = None
elif isinstance(value, list):
return [_filter_skip_null(v, err_flag) for v in value]
elif isinstance(value, dict):
return {k: _filter_skip_null(v, err_flag) for k, v in value.items()}
return value
class Conditional:
"""
Object holding conditional expression until we are ready to evaluate it.
Evaluation occurs at the moment the encloses step is ready to run.
"""
def __init__(
self,
expression: Union[str, None] = None,
outputs: Union[dict, None] = None,
requirements: List[CWLObjectType] = [],
):
"""
Instantiate a conditional expression.
:param expression: A string with the expression from the 'when' field of the step
:param outputs: The output dictionary for the step. This is needed because if the
step is skipped, all the outputs need to be populated with SkipNull
values
:param requirements: The requirements object that is needed for the context the
expression will evaluate in.
"""
self.expression = expression
self.outputs = outputs
self.requirements = requirements
def is_false(self, job: Union[dict, None]) -> bool:
"""
Determine if expression evaluates to False given completed step inputs.
:param job: job output object
:return: bool
"""
if self.expression is None:
return False
expr_is_true = cwltool.expression.do_eval(
self.expression,
{shortname(k): v for k, v in resolve_dict_w_promises(job).items()},
self.requirements,
None,
None,
{},
)
if isinstance(expr_is_true, bool):
return not expr_is_true
raise cwltool.errors.WorkflowException(
"'%s' evaluated to a non-boolean value" % self.expression
)
def skipped_outputs(self) -> dict:
"""
Generate a dict of SkipNull objects corresponding to the output structure of the step.
:return: dict
"""
outobj = {}
def sn(n):
if isinstance(n, Mapping):
return shortname(n["id"])
if isinstance(n, str):
return shortname(n)
for k in [sn(o) for o in self.outputs]:
outobj[k] = SkipNull()
return outobj
class ResolveSource:
"""Apply linkMerge and pickValue operators to values coming into a port."""
def __init__(self, name: str, input: dict, source_key: str, promises: dict):
"""
Construct a container object.
It will carry what information it can about the input sources and the
current promises, ready for evaluation when the time comes.
:param name: human readable name of step/port that this value refers to
:param input: CWL input object complete with linkMerge and pickValue fields
:param source_key: "source" or "outputSource" depending on what it is
:param promises: incident values packed as promises
"""
self.name, self.input, self.source_key = name, input, source_key
source_names = aslist(self.input[self.source_key])
# Rule is that source: [foo] is just foo unless it also has linkMerge: merge_nested
if input.get("linkMerge") or len(source_names) > 1:
self.promise_tuples = [
(shortname(s), promises[s].rv()) for s in source_names
]
else:
# KG: Cargo culting this logic and the reason given from original Toil code:
# It seems that an input source with a
# '#' in the name will be returned as a
# CommentedSeq list by the yaml parser.
s = str(source_names[0])
self.promise_tuples = (shortname(s), promises[s].rv()) # type: ignore
def resolve(self) -> Any:
"""
First apply linkMerge then pickValue if either present.
:return: dict
"""
if isinstance(self.promise_tuples, list):
result = self.link_merge([v[1][v[0]] for v in self.promise_tuples]) # type: ignore
else:
value = self.promise_tuples
result = value[1].get(value[0]) # type: ignore
result = self.pick_value(result)
result = filter_skip_null(self.name, result)
return result
def link_merge(self, values: dict) -> Union[list, dict]:
"""
Apply linkMerge operator to `values` object.
:param values: dict: result of step
"""
link_merge_type = self.input.get("linkMerge", "merge_nested")
if link_merge_type == "merge_nested":
return values
elif link_merge_type == "merge_flattened":
result = [] # type: ignore
for v in values:
if isinstance(v, MutableSequence):
result.extend(v)
else:
result.append(v)
return result
else:
raise validate.ValidationException(
"Unsupported linkMerge '%s' on %s." % (link_merge_type, self.name)
)
def pick_value(self, values: Union[List, Any]) -> Any:
"""
Apply pickValue operator to `values` object.
:param values: Intended to be a list, but other types will be returned
without modification.
:return:
"""
pick_value_type = self.input.get("pickValue")
if pick_value_type is None:
return values
if not isinstance(values, list):
logger.warning("pickValue used but input %s is not a list." % self.name)
return values
result = [v for v in values if not isinstance(v, SkipNull) and v is not None]
if pick_value_type == "first_non_null":
if len(result) < 1:
raise cwltool.errors.WorkflowException(
"%s: first_non_null operator found no non-null values" % self.name
)
else:
return result[0]
elif pick_value_type == "the_only_non_null":
if len(result) == 0:
raise cwltool.errors.WorkflowException(
"%s: the_only_non_null operator found no non-null values"
% self.name
)
elif len(result) > 1:
raise cwltool.errors.WorkflowException(
"%s: the_only_non_null operator found more than one non-null values"
% self.name
)
else:
return result[0]
elif pick_value_type == "all_non_null":
return result
else:
raise cwltool.errors.WorkflowException(
"Unsupported pickValue '%s' on %s" % (pick_value_type, self.name)
)
class StepValueFrom:
"""
A workflow step input which has a valueFrom expression attached to it.
The valueFrom expression will be evaluated to produce the actual input
object for the step.
"""
def __init__(self, expr: str, source: Any, req: List[CWLObjectType]):
"""
Instantiate an object to carry all know about this valueFrom expression.
:param expr: str: expression as a string
:param source: the source promise of this step
:param req: requirements object that is consumed by CWLtool expression evaluator
"""
self.expr = expr
self.source = source
self.context = None
self.req = req
def eval_prep(self, step_inputs: dict, file_store: AbstractFileStore):
"""
Resolve the contents of any file in a set of inputs.
The inputs must be associated with the StepValueFrom object's self.source.
Called when loadContents is specified.
:param step_inputs: Workflow step inputs.
:param file_store: A toil file store, needed to resolve toil fs:// paths.
"""
for v in step_inputs.values():
val = cast(CWLObjectType, v)
source_input = getattr(self.source, "input", {})
if isinstance(val, dict) and isinstance(source_input, dict):
if (
val.get("contents") is None
and source_input.get("loadContents") is True
):
fs_access = functools.partial(ToilFsAccess, file_store=file_store)
with fs_access("").open(cast(str, val["location"]), "rb") as f:
val["contents"] = cwltool.builder.content_limit_respected_read(
f
)
def resolve(self) -> Any:
"""
Resolve the promise in the valueFrom expression's context.
:return: object that will serve as expression context
"""
self.context = self.source.resolve()
return self.context
def do_eval(self, inputs: CWLObjectType) -> Any:
"""
Evaluate the valueFrom expression with the given input object.
:param inputs:
:return: object
"""
return cwltool.expression.do_eval(
self.expr, inputs, self.req, None, None, {}, context=self.context
)
class DefaultWithSource:
"""A workflow step input that has both a source and a default value."""
def __init__(self, default: Any, source: Any):
"""
Instantiate an object to handle a source that has a default value.
:param default: the default value
:param source: the source object
"""
self.default = default
self.source = source
def resolve(self) -> Any:
"""
Determine the final input value when the time is right.
(when the source can be resolved)
:return: dict
"""
if self.source:
result = self.source.resolve()
if result is not None:
return result
return self.default
class JustAValue:
"""A simple value masquerading as a 'resolve'-able object."""
def __init__(self, val: Any):
"""Store the value."""
self.val = val
def resolve(self) -> Any:
"""Return the value."""
return self.val
def resolve_dict_w_promises(
dict_w_promises: dict, file_store: AbstractFileStore = None
) -> dict:
"""
Resolve a dictionary of promises evaluate expressions to produce the actual values.
:param dict_w_promises: input dict for these values
:return: dictionary of actual values
"""
if isinstance(dict_w_promises, UnresolvedDict):
first_pass_results = {k: v.resolve() for k, v in dict_w_promises.items()}
else:
first_pass_results = {k: v for k, v in dict_w_promises.items()}
result = {}
for k, v in dict_w_promises.items():
if isinstance(v, StepValueFrom):
if file_store:
v.eval_prep(first_pass_results, file_store)
result[k] = v.do_eval(inputs=first_pass_results)
else:
result[k] = first_pass_results[k]
# '_:' prefixed file paths are a signal to cwltool to create folders in place
# rather than copying them, so we make them here
for entry in result:
if isinstance(result[entry], dict):
location = result[entry].get("location")
if location:
if location.startswith("_:file://"):
local_dir_path = location[len("_:file://") :]
os.makedirs(local_dir_path, exist_ok=True)
result[entry]["location"] = local_dir_path
return result
def simplify_list(maybe_list: Any) -> Any:
"""
Turn a length one list loaded by cwltool into a scalar.
Anything else is passed as-is, by reference.
"""
if isinstance(maybe_list, MutableSequence):
is_list = aslist(maybe_list)
if len(is_list) == 1:
return is_list[0]
return maybe_list
class ToilPathMapper(PathMapper):
"""
Keeps track of files in a Toil way.
Maps the symbolic identifier of a file (the Toil FileID), its local path on
the host (the value returned by readGlobalFile) and the the location of the
file inside the software container.
"""
def __init__(
self,
referenced_files: list,
basedir: str,
stagedir: str,
separateDirs: bool = True,
get_file: Union[Any, None] = None,
stage_listing: bool = False,
):
"""Initialize this ToilPathMapper."""
self.get_file = get_file
self.stage_listing = stage_listing
super(ToilPathMapper, self).__init__(
referenced_files, basedir, stagedir, separateDirs=separateDirs
)
def visit(
self,
obj: CWLObjectType,
stagedir: str,
basedir: str,
copy: bool = False,
staged: bool = False,
) -> None:
"""Iterate over a CWL object, resolving File and Directory path references."""
stagedir = cast(Optional[str], obj.get("dirname")) or stagedir
tgt = convert_pathsep_to_unix(
os.path.join(
stagedir,
cast(str, obj["basename"]),
)
)
if obj["location"] in self._pathmap:
return
if obj["class"] == "Directory":
location = cast(str, obj["location"])
if location.startswith("file://"):
resolved = schema_salad.ref_resolver.uri_file_path(location)
else:
resolved = location
self._pathmap[location] = MapperEnt(
resolved, tgt, "WritableDirectory" if copy else "Directory", staged
)
if location.startswith("file://"):
staged = False
self.visitlisting(
cast(List, obj.get("listing", [])),
tgt,
basedir,
copy=copy,
staged=staged,
)
elif obj["class"] == "File":
path = cast(str, obj["location"])
ab = cwltool.stdfsaccess.abspath(path, basedir)
if "contents" in obj and path.startswith("_:"):
self._pathmap[path] = MapperEnt(
cast(str, obj["contents"]),
tgt,
"CreateWritableFile" if copy else "CreateFile",
staged,
)
else:
with SourceLine(
obj,
"location",
validate.ValidationException,
logger.isEnabledFor(logging.DEBUG),
):
deref = self.get_file(path) if self.get_file else ab
if deref.startswith("file:"):
deref = schema_salad.ref_resolver.uri_file_path(deref)
if urllib.parse.urlsplit(deref).scheme in ["http", "https"]:
deref = downloadHttpFile(path)
elif urllib.parse.urlsplit(deref).scheme != "toilfs":
# Dereference symbolic links
st = os.lstat(deref)
while stat.S_ISLNK(st.st_mode):
rl = os.readlink(deref)
deref = (
rl
if os.path.isabs(rl)
else os.path.join(os.path.dirname(deref), rl)
)
st = os.lstat(deref)
self._pathmap[path] = MapperEnt(
deref, tgt, "WritableFile" if copy else "File", staged
)
self.visitlisting(
cast(List[CWLObjectType], obj.get("secondaryFiles", [])),
stagedir,
basedir,
copy=copy,
staged=staged,
)
class ToilCommandLineTool(cwltool.command_line_tool.CommandLineTool):
"""Subclass the cwltool command line tool to provide the custom Toil.PathMapper."""
def make_path_mapper(
self,
reffiles: List[Any],
stagedir: str,
runtimeContext: cwltool.context.RuntimeContext,
separateDirs: bool,
) -> cwltool.pathmapper.PathMapper:
"""Create the appropriate ToilPathMapper for the situation."""
return ToilPathMapper(
reffiles,
runtimeContext.basedir,
stagedir,
separateDirs,
runtimeContext.toil_get_file, # type: ignore
)
def toil_make_tool(
toolpath_object: CommentedMap,
loadingContext: cwltool.context.LoadingContext,
) -> Process:
"""
Emit custom ToilCommandLineTools.
This factory funciton is meant to be passed to cwltool.load_tool().
"""
if (
isinstance(toolpath_object, Mapping)
and toolpath_object.get("class") == "CommandLineTool"
):
return ToilCommandLineTool(toolpath_object, loadingContext)
return cwltool.workflow.default_make_tool(toolpath_object, loadingContext)
class ToilFsAccess(cwltool.stdfsaccess.StdFsAccess):
"""Custom filesystem access class which handles toil filestore references."""
def __init__(self, basedir: str, file_store: AbstractFileStore = None):
"""Create a FsAccess object for the given Toil Filestore and basedir."""
self.file_store = file_store
super(ToilFsAccess, self).__init__(basedir)
def exists(self, path: str) -> bool:
"""Test for file existance."""
# toil's _abs() throws errors when files are not found and cwltool's _abs() does not
try:
return os.path.exists(self._abs(path))
except NoSuchFileException:
return False
def realpath(self, path: str) -> str:
if path.startswith("toilfs:"):
# import the file and make it available locally if it exists
path = self._abs(path)
elif path.startswith("_:"):
return path
return os.path.realpath(path)
def listdir(self, fn: str) -> List[str]:
directory = self._abs(fn)
if fn.startswith("_:file://"):
directory = fn[len("_:file://") :]
if os.path.isdir(directory):
return [
cwltool.stdfsaccess.abspath(urllib.parse.quote(entry), fn)
for entry in os.listdir(directory)
]
else:
return []
else:
return [
cwltool.stdfsaccess.abspath(urllib.parse.quote(entry), fn)
for entry in os.listdir(self._abs(directory))
]
def _abs(self, path: str) -> str:
"""
Return a local absolute path for a file (no schema).
Overwrites cwltool.stdfsaccess.StdFsAccess._abs() to account for toil specific schema.
"""
# Used to fetch a path to determine if a file exists in the inherited
# cwltool.stdfsaccess.StdFsAccess, (among other things) so this should
# not error on missing files.
# See: https://github.com/common-workflow-language/cwltool/blob/beab66d649dd3ee82a013322a5e830875e8556ba/cwltool/stdfsaccess.py#L43 # noqa B950
if path.startswith("toilfs:"):
logger.debug("Need to download file to get a local absolute path.")
destination = self.file_store.readGlobalFile(FileID.unpack(path[7:]))
logger.debug("Downloaded %s to %s", path, destination)
if not os.path.exists(destination):
raise RuntimeError(
f"{destination} does not exist after filestore import."
)
elif path.startswith("_:file://"):
destination = path
else:
destination = super(ToilFsAccess, self)._abs(path)
return destination
def toil_get_file(
file_store: AbstractFileStore, index: dict, existing: dict, file_store_id: str
) -> str:
"""Get path to input file from Toil jobstore."""
if not file_store_id.startswith("toilfs:"):
return file_store.jobStore.getPublicUrl(
file_store.jobStore.importFile(file_store_id)
)
src_path = file_store.readGlobalFile(FileID.unpack(file_store_id[7:]))
index[src_path] = file_store_id
existing[file_store_id] = src_path
return schema_salad.ref_resolver.file_uri(src_path)
def write_file(writeFunc: Any, index: dict, existing: dict, file_uri: str) -> str:
"""
Write a file into the Toil jobstore.
'existing' is a set of files retrieved as inputs from toil_get_file. This
ensures they are mapped back as the same name if passed through.
Returns a toil uri path to the object.
"""
# Toil fileStore reference
if file_uri.startswith("toilfs:"):
return file_uri
# File literal outputs with no path, we don't write these and will fail
# with unsupportedRequirement when retrieving later with getFile
elif file_uri.startswith("_:"):
return file_uri
else:
file_uri = existing.get(file_uri, file_uri)
if file_uri not in index:
if not urlparse.urlparse(file_uri).scheme:
rp = os.path.realpath(file_uri)
else:
rp = file_uri
try:
index[file_uri] = "toilfs:" + writeFunc(rp).pack()
existing[index[file_uri]] = file_uri
except Exception as e:
logger.error("Got exception '%s' while copying '%s'", e, file_uri)
raise
return index[file_uri]
def prepareDirectoryForUpload(
directory_metadata: dict, skip_broken: bool = False
) -> None:
"""
Prepare a Directory object to be uploaded.
Assumes listings are already filled in.
Makes sure the directory actually exists, and rewrites its location to be
something we can use on another machine.
Since Files and sub-Directories are already tracked by the directory's
listing, we just need some sentinel path to represent the existence of a
directory coming from Toil and not the local filesystem.
"""
if directory_metadata["location"].startswith("toilfs:") or directory_metadata[
"location"
].startswith("_:"):
# Already in Toil; nothing to do
return
if not directory_metadata["location"] and directory_metadata["path"]:
directory_metadata["location"] = schema_salad.ref_resolver.file_uri(
directory_metadata["path"]
)
if directory_metadata["location"].startswith("file://") and not os.path.isdir(
directory_metadata["location"][7:]
):
if skip_broken:
return
else:
raise cwltool.errors.WorkflowException(
"Directory is missing: %s" % directory_metadata["location"]
)
# The metadata for a directory is all we need to keep around for it. It
# doesn't have a real location. But each directory needs a unique location
# or cwltool won't ship the metadata along. cwltool takes "_:" as a signal
# to make directories instead of copying from somewhere. So we give every
# directory a unique _: location and cwltool's machinery Just Works.
directory_metadata["location"] = "_:" + directory_metadata["location"]
logger.debug("Sending directory at %s", directory_metadata["location"])
def uploadFile(
uploadfunc: Any,
fileindex: dict,
existing: dict,
file_metadata: dict,
skip_broken: bool = False,
) -> None:
"""
Update a file object so that the location is a reference to the toil file store.
Write the file object to the file store if necessary.
"""
if file_metadata["location"].startswith("toilfs:") or file_metadata[
"location"
].startswith("_:"):
return
if file_metadata["location"] in fileindex:
file_metadata["location"] = fileindex[file_metadata["location"]]
return
if not file_metadata["location"] and file_metadata["path"]:
file_metadata["location"] = schema_salad.ref_resolver.file_uri(
file_metadata["path"]
)
if file_metadata["location"].startswith("file://") and not os.path.isfile(
file_metadata["location"][7:]
):
if skip_broken:
return
else:
raise cwltool.errors.WorkflowException(
"File is missing: %s" % file_metadata["location"]
)
file_metadata["location"] = write_file(
uploadfunc, fileindex, existing, file_metadata["location"]
)
logger.debug("Sending file at: %s", file_metadata["location"])
def writeGlobalFileWrapper(file_store: AbstractFileStore, fileuri: str) -> str:
"""Wrap writeGlobalFile to accept file:// URIs."""
fileuri = fileuri if ":/" in fileuri else f"file://{fileuri}"
return file_store.writeGlobalFile(schema_salad.ref_resolver.uri_file_path(fileuri))
def remove_empty_listings(rec: CWLObjectType) -> None:
if rec.get("class") != "Directory":
finddirs = [] # type: List[CWLObjectType]
visit_class(rec, ("Directory",), finddirs.append)
for f in finddirs:
remove_empty_listings(f)
return
if "listing" in rec and rec["listing"] == []:
del rec["listing"]
return
class ResolveIndirect(Job):
"""
Helper Job.
Accepts an unresolved dict (containing promises) and produces a dictionary
of actual values.
"""
def __init__(self, cwljob: dict):
"""Store the dictionary of promises for later resolution."""
super(ResolveIndirect, self).__init__(cores=1, memory=1024^2, disk=0)
self.cwljob = cwljob
def run(self, file_store: AbstractFileStore) -> dict:
"""Evaluate the promises and return their values."""
return resolve_dict_w_promises(self.cwljob)
def toilStageFiles(
file_store: AbstractFileStore,
cwljob: Union[Dict[Text, Any], List[Dict[Text, Any]]],
outdir: str,
destBucket: Union[str, None] = None,
) -> None:
"""Copy input files out of the global file store and update location and path."""
def _collectDirEntries(
obj: Union[Dict[Text, Any], List[Dict[Text, Any]]]
) -> Iterator[Dict[Text, Any]]:
if isinstance(obj, dict):
if obj.get("class") in ("File", "Directory"):
yield obj
for dir_entry in _collectDirEntries(obj.get("secondaryFiles", [])):
yield dir_entry
else:
for sub_obj in obj.values():
for dir_entry in _collectDirEntries(sub_obj):
yield dir_entry
elif isinstance(obj, list):
for sub_obj in obj:
for dir_entry in _collectDirEntries(sub_obj):
yield dir_entry
jobfiles = list(_collectDirEntries(cwljob))
pm = ToilPathMapper(jobfiles, "", outdir, separateDirs=False, stage_listing=True)
for _, p in pm.items():
if p.staged:
if destBucket and p.type in ["File", "CreateFile"]:
# Directories don't need to be created if we're exporting to a bucket
baseName = p.target[len(outdir) :]
local_file_path = p.resolved[len("file://") :]
if (
p.type == "CreateFile"
): # TODO: CreateFile for buckets is not under testing
local_file_path = os.path.join(
file_store.getLocalTempDir(), baseName
)
with open(local_file_path, "wb") as n:
n.write(p.resolved.encode("utf-8"))
destUrl = "/".join(s.strip("/") for s in [destBucket, baseName])
file_store.exportFile(FileID.unpack(local_file_path), destUrl)
else:
if not os.path.exists(p.target) and p.type == "Directory":
os.makedirs(p.target)
if not os.path.exists(p.target) and p.type == "File":
os.makedirs(os.path.dirname(p.target), exist_ok=True)
file_store.exportFile(
FileID.unpack(p.resolved[7:]), "file://" + p.target
)
if not os.path.exists(p.target) and p.type == "CreateFile":
os.makedirs(os.path.dirname(p.target), exist_ok=True)
with open(p.target, "wb") as n:
n.write(p.resolved.encode("utf-8"))
def _check_adjust(f: dict) -> dict:
f["location"] = schema_salad.ref_resolver.file_uri(pm.mapper(f["location"])[1])
if "contents" in f:
del f["contents"]
return f
visit_class(cwljob, ("File", "Directory"), _check_adjust)
class CWLJobWrapper(Job):
"""
Wrap a CWL job that uses dynamic resources requirement.
When executed, this creates a new child job which has the correct resource
requirement set.
"""
def __init__(
self,
tool: ToilCommandLineTool,
cwljob: dict,
runtime_context: cwltool.context.RuntimeContext,
conditional: Union[Conditional, None] = None,
):
"""Store our context for later evaluation."""
super(CWLJobWrapper, self).__init__(cores=1, memory=1024 * 1024, disk=8 * 1024)
self.cwltool = remove_pickle_problems(tool)
self.cwljob = cwljob
self.runtime_context = runtime_context
self.conditional = conditional
def run(self, file_store: AbstractFileStore) -> Any:
"""Create a child job with the correct resource requirements set."""
cwljob = resolve_dict_w_promises(self.cwljob, file_store)
fill_in_defaults(
self.cwltool.tool["inputs"],
cwljob,
self.runtime_context.make_fs_access(self.runtime_context.basedir or ""),
)
realjob = CWLJob(
tool=self.cwltool,
cwljob=cwljob,
runtime_context=self.runtime_context,
conditional=self.conditional,
)
self.addChild(realjob)
return realjob.rv()
class CWLJob(Job):
"""Execute a CWL tool using cwltool.executors.SingleJobExecutor."""
def __init__(
self,
tool: ToilCommandLineTool,
cwljob: dict,
runtime_context: cwltool.context.RuntimeContext,
conditional: Union[Conditional, None] = None,
):
"""Store the context for later execution."""
self.cwltool = remove_pickle_problems(tool)
self.conditional = conditional or Conditional()
if runtime_context.builder:
self.builder = runtime_context.builder
else:
self.builder = cwltool.builder.Builder(
job=cwljob,
files=[],
bindings=[],
schemaDefs={},
names=Names(),
requirements=self.cwltool.requirements,
hints=[],
resources={},
mutation_manager=None,
formatgraph=None,
make_fs_access=runtime_context.make_fs_access, # type: ignore
fs_access=runtime_context.make_fs_access(""),
job_script_provider=None,
timeout=runtime_context.eval_timeout,
debug=False,
js_console=False,
force_docker_pull=False,
loadListing=determine_load_listing(tool),
outdir="",
tmpdir="/tmp", # TODO: use actual defaults here
stagedir="/var/lib/cwl", # TODO: use actual defaults here
cwlVersion=cast(str, self.cwltool.metadata["cwlVersion"]),
)
req = tool.evalResources(self.builder, runtime_context)
# pass the default of None if basecommand is empty
unitName = self.cwltool.tool.get("baseCommand", None)
if isinstance(unitName, (MutableSequence, tuple)):
unitName = " ".join(unitName)
try:
displayName = str(self.cwltool.tool["id"])
except KeyError:
displayName = None
super(CWLJob, self).__init__(
cores=req["cores"],
memory=int(req["ram"] * (2 ** 20)),
disk=int(
(cast(int, req["tmpdirSize"]) * (2 ** 20))
+ (cast(int, req["outdirSize"]) * (2 ** 20))
),
unitName=unitName,
displayName=displayName,
)
self.cwljob = cwljob
try:
self.jobName = str(self.cwltool.tool["id"])
except KeyError:
# fall back to the Toil defined class name if the tool doesn't have
# an identifier
pass
self.runtime_context = runtime_context
self.step_inputs = self.cwltool.tool["inputs"]
self.workdir = runtime_context.workdir # type: ignore
def required_env_vars(self, cwljob: Any) -> Iterator[Tuple[str, str]]:
"""Yield environment variables from EnvVarRequirement."""
if isinstance(cwljob, dict):
if cwljob.get("class") == "EnvVarRequirement":
for t in cwljob.get("envDef", {}):
yield t["envName"], cast(str, self.builder.do_eval(t["envValue"]))
for v in cwljob.values():
for env_name, env_value in self.required_env_vars(v):
yield env_name, env_value
if isinstance(cwljob, list):
for env_var in cwljob:
for env_name, env_value in self.required_env_vars(env_var):
yield env_name, env_value
def populate_env_vars(self, cwljob: dict) -> dict:
"""
Prepare environment variables necessary at runtime for the job.
Env vars specified in the CWL "requirements" section should already be
loaded in self.cwltool.requirements, however those specified with
"EnvVarRequirement" take precedence and are only populated here. Therefore,
this not only returns a dictionary with all evaluated "EnvVarRequirement"
env vars, but checks self.cwltool.requirements for any env vars with the
same name and replaces their value with that found in the
"EnvVarRequirement" env var if it exists.
"""
self.builder.job = cwljob
required_env_vars = {}
# iterate over EnvVarRequirement env vars, if any
for k, v in self.required_env_vars(cwljob):
required_env_vars[
k
] = v # will tell cwltool which env vars to take from the environment
os.environ[k] = v
# needs to actually be populated in the environment as well or
# they're not used
# EnvVarRequirement env vars take priority over those specified with
# "requirements" so cwltool.requirements need to be overwritten if an
# env var with the same name is found
for req in self.cwltool.requirements:
for env_def in cast(Dict, req.get("envDef", {})):
env_name = env_def.get("envName", "")
if env_name in required_env_vars:
env_def["envValue"] = required_env_vars[env_name]
return required_env_vars
def run(self, file_store: AbstractFileStore) -> Any:
"""Execute the CWL document."""
# Adjust cwltool's logging to conform to Toil's settings.
# We need to make sure this happens in every worker process before we
# do CWL things.
cwllogger.removeHandler(defaultStreamHandler)
cwllogger.setLevel(logger.getEffectiveLevel())
cwljob = resolve_dict_w_promises(self.cwljob, file_store)
if self.conditional.is_false(cwljob):
return self.conditional.skipped_outputs()
fill_in_defaults(
self.step_inputs, cwljob, self.runtime_context.make_fs_access("")
)
required_env_vars = self.populate_env_vars(cwljob)
immobile_cwljob_dict = copy.deepcopy(cwljob)
for inp_id in immobile_cwljob_dict.keys():
found = False
for field in cast(
List[Dict[str, str]], self.cwltool.inputs_record_schema["fields"]
):
if field["name"] == inp_id:
found = True
if not found:
cwljob.pop(inp_id)
adjustDirObjs(
cwljob,
functools.partial(remove_empty_listings),
)
# Exports temporary directory for batch systems that reset TMPDIR
os.environ["TMPDIR"] = os.path.realpath(file_store.getLocalTempDir())
outdir = os.path.join(file_store.getLocalTempDir(), "out")
os.mkdir(outdir)
# Just keep the temporary output prefix under the job's local temp dir,
# next to the outdir.
#
# If we maintain our own system of nested temp directories, we won't
# know when all the jobs using a higher-level directory are ready for
# it to be deleted. The local temp dir, under Toil's workDir, will be
# cleaned up by Toil.
tmp_outdir_prefix = os.path.join(file_store.getLocalTempDir(), "tmp-out")
index = {} # type: ignore
existing = {} # type: ignore
# Prepare the run instructions for cwltool
runtime_context = self.runtime_context.copy()
runtime_context.basedir = os.getcwd()
runtime_context.outdir = outdir
runtime_context.tmp_outdir_prefix = tmp_outdir_prefix
runtime_context.tmpdir_prefix = file_store.getLocalTempDir()
runtime_context.make_fs_access = functools.partial(
ToilFsAccess, file_store=file_store
)
runtime_context.preserve_environment = required_env_vars
runtime_context.toil_get_file = functools.partial( # type: ignore
toil_get_file, file_store, index, existing
)
# TODO: Pass in a real builder here so that cwltool's builder is built with Toil's fs_access?
# see: https://github.com/common-workflow-language/cwltool/blob/78fe9d41ee5a44f8725dfbd7028e4a5ee42949cf/cwltool/builder.py#L474
# self.builder.outdir = outdir
# runtime_context.builder = self.builder
process_uuid = uuid.uuid4() # noqa F841
started_at = datetime.datetime.now() # noqa F841
logger.debug("Running CWL job: %s", cwljob)
output, status = cwltool.executors.SingleJobExecutor().execute(
process=self.cwltool,
job_order_object=cwljob,
runtime_context=runtime_context,
logger=cwllogger,
)
ended_at = datetime.datetime.now() # noqa F841
if status != "success":
raise cwltool.errors.WorkflowException(status)
adjustDirObjs(
output,
functools.partial(
get_listing, cwltool.stdfsaccess.StdFsAccess(outdir), recursive=True
),
)
adjustDirObjs(output, prepareDirectoryForUpload)
# write the outputs into the jobstore
adjustFileObjs(
output,
functools.partial(
uploadFile,
functools.partial(writeGlobalFileWrapper, file_store),
index,
existing,
),
)
# metadata[process_uuid] = {
# 'started_at': started_at,
# 'ended_at': ended_at,
# 'job_order': cwljob,
# 'outputs': output,
# 'internal_name': self.jobName
# }
return output
def makeJob(
tool: Process,
jobobj: dict,
runtime_context: cwltool.context.RuntimeContext,
conditional: Union[Conditional, None],
) -> tuple:
"""
Create the correct Toil Job object for the CWL tool.
Types: workflow, job, or job wrapper for dynamic resource requirements.
:return: "wfjob, followOn" if the input tool is a workflow, and "job, job" otherwise
"""
if tool.tool["class"] == "Workflow":
wfjob = CWLWorkflow(
cast(cwltool.workflow.Workflow, tool),
jobobj,
runtime_context,
conditional=conditional,
)
followOn = ResolveIndirect(wfjob.rv())
wfjob.addFollowOn(followOn)
return wfjob, followOn
else:
resourceReq, _ = tool.get_requirement("ResourceRequirement")
if resourceReq:
for req in (
"coresMin",
"coresMax",
"ramMin",
"ramMax",
"tmpdirMin",
"tmpdirMax",
"outdirMin",
"outdirMax",
):
r = resourceReq.get(req)
if isinstance(r, str) and ("$(" in r or "${" in r):
# Found a dynamic resource requirement so use a job wrapper
job = CWLJobWrapper(
cast(ToilCommandLineTool, tool),
jobobj,
runtime_context,
conditional=conditional,
)
return job, job
job = CWLJob(tool, jobobj, runtime_context, conditional=conditional) # type: ignore
return job, job
class CWLScatter(Job):
"""
Implement workflow scatter step.
When run, this creates a child job for each parameterization of the scatter.
"""
def __init__(
self,
step: cwltool.workflow.WorkflowStep,
cwljob: dict,
runtime_context: cwltool.context.RuntimeContext,
conditional: Union[Conditional, None],
):
"""Store our context for later execution."""
super(CWLScatter, self).__init__(cores=1, memory=100*1024^2, disk=0)
self.step = step
self.cwljob = cwljob
self.runtime_context = runtime_context
self.conditional = conditional
def flat_crossproduct_scatter(
self, joborder: dict, scatter_keys: list, outputs: list, postScatterEval: Any
) -> None:
"""Cartesian product of the inputs, then flattened."""
scatter_key = shortname(scatter_keys[0])
for n in range(0, len(joborder[scatter_key])):
updated_joborder = copy.copy(joborder)
updated_joborder[scatter_key] = joborder[scatter_key][n]
if len(scatter_keys) == 1:
updated_joborder = postScatterEval(updated_joborder)
subjob, followOn = makeJob(
tool=self.step.embedded_tool,
jobobj=updated_joborder,
runtime_context=self.runtime_context,
conditional=self.conditional,
)
self.addChild(subjob)
outputs.append(followOn.rv())
else:
self.flat_crossproduct_scatter(
updated_joborder, scatter_keys[1:], outputs, postScatterEval
)
def nested_crossproduct_scatter(
self, joborder: dict, scatter_keys: list, postScatterEval: Any
) -> list:
"""Cartesian product of the inputs."""
scatter_key = shortname(scatter_keys[0])
outputs = []
for n in range(0, len(joborder[scatter_key])):
updated_joborder = copy.copy(joborder)
updated_joborder[scatter_key] = joborder[scatter_key][n]
if len(scatter_keys) == 1:
updated_joborder = postScatterEval(updated_joborder)
subjob, followOn = makeJob(
tool=self.step.embedded_tool,
jobobj=updated_joborder,
runtime_context=self.runtime_context,
conditional=self.conditional,
)
self.addChild(subjob)
outputs.append(followOn.rv())
else:
outputs.append(
self.nested_crossproduct_scatter(
updated_joborder, scatter_keys[1:], postScatterEval
)
)
return outputs
def run(self, file_store: AbstractFileStore) -> list:
"""Generate the follow on scatter jobs."""
cwljob = resolve_dict_w_promises(self.cwljob, file_store)
if isinstance(self.step.tool["scatter"], str):
scatter = [self.step.tool["scatter"]]
else:
scatter = self.step.tool["scatter"]
scatterMethod = self.step.tool.get("scatterMethod", None)
if len(scatter) == 1:
scatterMethod = "dotproduct"
outputs = []
valueFrom = {
shortname(i["id"]): i["valueFrom"]
for i in self.step.tool["inputs"]
if "valueFrom" in i
}
def postScatterEval(job_dict: dict) -> Any:
shortio = {shortname(k): v for k, v in job_dict.items()}
for k in valueFrom:
job_dict.setdefault(k, None)
def valueFromFunc(k: str, v: Any) -> Any:
if k in valueFrom:
return cwltool.expression.do_eval(
valueFrom[k],
shortio,
self.step.requirements,
None,
None,
{},
context=v,
)
else:
return v
return {k: valueFromFunc(k, v) for k, v in list(job_dict.items())}
if scatterMethod == "dotproduct":
for i in range(0, len(cwljob[shortname(scatter[0])])):
copyjob = copy.copy(cwljob)
for sc in [shortname(x) for x in scatter]:
copyjob[sc] = cwljob[sc][i]
copyjob = postScatterEval(copyjob)
subjob, follow_on = makeJob(
tool=self.step.embedded_tool,
jobobj=copyjob,
runtime_context=self.runtime_context,
conditional=self.conditional,
)
self.addChild(subjob)
outputs.append(follow_on.rv())
elif scatterMethod == "nested_crossproduct":
outputs = self.nested_crossproduct_scatter(cwljob, scatter, postScatterEval)
elif scatterMethod == "flat_crossproduct":
self.flat_crossproduct_scatter(cwljob, scatter, outputs, postScatterEval)
else:
if scatterMethod:
raise validate.ValidationException(
"Unsupported complex scatter type '%s'" % scatterMethod
)
else:
raise validate.ValidationException(
"Must provide scatterMethod to scatter over multiple" " inputs."
)
return outputs
class CWLGather(Job):
"""
Follows on to a scatter Job.
This gathers the outputs of each job in the scatter into an array for each
output parameter.
"""
def __init__(
self,
step: cwltool.workflow.WorkflowStep,
outputs: Union[Mapping, MutableSequence],
):
"""Collect our context for later gathering."""
super(CWLGather, self).__init__(cores=1, memory=10*1024^2, disk=0)
self.step = step
self.outputs = outputs
@staticmethod
def extract(obj: Union[Mapping, MutableSequence], k: str) -> list:
"""
Extract the given key from the obj.
If the object is a list, extract it from all members of the list.
"""
if isinstance(obj, Mapping):
return obj.get(k)
elif isinstance(obj, MutableSequence):
cp = []
for item in obj:
cp.append(CWLGather.extract(item, k))
return cp
else:
return []
def run(self, file_store: AbstractFileStore) -> Dict[str, Any]:
"""Gather all the outputs of the scatter."""
outobj = {}
def sn(n):
if isinstance(n, Mapping):
return shortname(n["id"])
if isinstance(n, str):
return shortname(n)
for k in [sn(i) for i in self.step.tool["out"]]:
outobj[k] = self.extract(self.outputs, k)
return outobj
class SelfJob(Job):
"""Fake job object to facilitate implementation of CWLWorkflow.run()."""
def __init__(self, j: "CWLWorkflow", v: dict):
"""Record the workflow and dictionary."""
super(SelfJob, self).__init__(cores=1, memory=1024^2, disk=0)
self.j = j
self.v = v
def rv(self, *path) -> Any:
"""Return our properties dictionary."""
return self.v
def addChild(self, c: str) -> Any:
"""Add a child to our workflow."""
return self.j.addChild(c)
def hasChild(self, c: str) -> Any:
"""Check if the given child is in our workflow."""
return self.j.hasChild(c)
ProcessType = TypeVar(
"ProcessType",
ToilCommandLineTool,
cwltool.workflow.WorkflowStep,
cwltool.workflow.Workflow,
cwltool.command_line_tool.CommandLineTool,
cwltool.command_line_tool.ExpressionTool,
Process,
)
def remove_pickle_problems(obj: ProcessType) -> ProcessType:
"""Doc_loader does not pickle correctly, causing Toil errors, remove from objects."""
if hasattr(obj, "doc_loader"):
obj.doc_loader = None
if isinstance(obj, cwltool.workflow.WorkflowStep):
obj.embedded_tool = remove_pickle_problems(obj.embedded_tool)
elif isinstance(obj, cwltool.workflow.Workflow):
obj.steps = [remove_pickle_problems(s) for s in obj.steps]
return obj
class CWLWorkflow(Job):
"""
Toil Job to convert a CWL workflow graph into a Toil job graph.
The Toil job graph will include the appropriate dependencies.
"""
def __init__(
self,
cwlwf: cwltool.workflow.Workflow,
cwljob: dict,
runtime_context: cwltool.context.RuntimeContext,
conditional: Union[Conditional, None] = None,
):
"""Gather our context for later execution."""
super(CWLWorkflow, self).__init__(cores=1, memory=100*1024^2, disk=0)
self.cwlwf = cwlwf
self.cwljob = cwljob
self.runtime_context = runtime_context
self.cwlwf = remove_pickle_problems(self.cwlwf)
self.conditional = conditional or Conditional()
def run(self, file_store: AbstractFileStore):
"""Convert a CWL Workflow graph into a Toil job graph."""
cwljob = resolve_dict_w_promises(self.cwljob, file_store)
if self.conditional.is_false(cwljob):
return self.conditional.skipped_outputs()
# `promises` dict
# from: each parameter (workflow input or step output)
# that may be used as a "source" for a step input workflow output
# parameter
# to: the job that will produce that value.
promises: Dict[str, Job] = {}
# `jobs` dict from step id to job that implements that step.
jobs = {}
for inp in self.cwlwf.tool["inputs"]:
promises[inp["id"]] = SelfJob(self, cwljob)
all_outputs_fulfilled = False
while not all_outputs_fulfilled:
# Iteratively go over the workflow steps, scheduling jobs as their
# dependencies can be fulfilled by upstream workflow inputs or
# step outputs. Loop exits when the workflow outputs
# are satisfied.
all_outputs_fulfilled = True
for step in self.cwlwf.steps:
if step.tool["id"] not in jobs:
stepinputs_fufilled = True
for inp in step.tool["inputs"]:
for s in aslist(inp.get("source", [])):
if s not in promises:
stepinputs_fufilled = False
if stepinputs_fufilled:
jobobj = {}
for inp in step.tool["inputs"]:
key = shortname(inp["id"])
if "source" in inp:
jobobj[key] = ResolveSource(
name=f'{step.tool["id"]}/{key}',
input=inp,
source_key="source",
promises=promises,
)
if "default" in inp:
jobobj[key] = DefaultWithSource( # type: ignore
copy.copy(inp["default"]), jobobj.get(key)
)
if "valueFrom" in inp and "scatter" not in step.tool:
jobobj[key] = StepValueFrom( # type: ignore
inp["valueFrom"],
jobobj.get(key, JustAValue(None)),
self.cwlwf.requirements,
)
conditional = Conditional(
expression=step.tool.get("when"),
outputs=step.tool["out"],
requirements=self.cwlwf.requirements,
)
if "scatter" in step.tool:
wfjob = CWLScatter(
step,
UnresolvedDict(jobobj),
self.runtime_context,
conditional=conditional,
)
followOn = CWLGather(step, wfjob.rv())
wfjob.addFollowOn(followOn)
else:
wfjob, followOn = makeJob(
tool=step.embedded_tool,
jobobj=UnresolvedDict(jobobj),
runtime_context=self.runtime_context,
conditional=conditional,
)
jobs[step.tool["id"]] = followOn
connected = False
for inp in step.tool["inputs"]:
for s in aslist(inp.get("source", [])):
if (
isinstance(promises[s], (CWLJobWrapper, CWLGather))
and not promises[s].hasFollowOn(wfjob)
# promises[s] job has already added wfjob as a followOn prior
and not wfjob.hasPredecessor(promises[s])
):
promises[s].addFollowOn(wfjob)
connected = True
if not isinstance(
promises[s], (CWLJobWrapper, CWLGather)
) and not promises[s].hasChild(wfjob):
promises[s].addChild(wfjob)
connected = True
if not connected:
# Workflow step is default inputs only & isn't connected
# to other jobs, so add it as child of this workflow.
self.addChild(wfjob)
for out in step.tool["outputs"]:
promises[out["id"]] = followOn
for inp in step.tool["inputs"]:
for source in aslist(inp.get("source", [])):
if source not in promises:
all_outputs_fulfilled = False
# may need a test
for out in self.cwlwf.tool["outputs"]:
if "source" in out:
if out["source"] not in promises:
all_outputs_fulfilled = False
outobj = {}
for out in self.cwlwf.tool["outputs"]:
key = shortname(out["id"])
outobj[key] = ResolveSource(
name="Workflow output '%s'" % key,
input=out,
source_key="outputSource",
promises=promises,
)
return UnresolvedDict(outobj)
def visitSteps(
cmdline_tool: Process,
op: Any,
) -> None:
"""Iterate over a CWL Process object, running the op on each WorkflowStep."""
if isinstance(cmdline_tool, cwltool.workflow.Workflow):
for step in cmdline_tool.steps:
op(step.tool)
visitSteps(step.embedded_tool, op)
def rm_unprocessed_secondary_files(job_params: Any) -> None:
if isinstance(job_params, list):
for j in job_params:
rm_unprocessed_secondary_files(j)
if isinstance(job_params, dict) and "secondaryFiles" in job_params:
job_params["secondaryFiles"] = filtered_secondary_files(job_params)
def filtered_secondary_files(unfiltered_secondary_files: dict) -> list:
"""
Remove unprocessed secondary files.
Interpolated strings and optional inputs in secondary files were added to
CWL in version 1.1.
The CWL libraries we call do successfully resolve the interpolated strings,
but add the resolved fields to the list of unresolved fields so we remove
them here after the fact.
We also remove any secondary files here not containing 'toilfs:', which
means that it was not successfully imported into the toil jobstore. The
'required' logic seems to be handled deeper in cwltool.builder.Builder(),
and correctly determines which files should be imported. Therefore we
remove the files here and if this file is SUPPOSED to exist, it will still
give the appropriate file does not exist error, but just a bit further down
the track.
"""
intermediate_secondary_files = []
final_secondary_files = []
# remove secondary files still containing interpolated strings
for sf in unfiltered_secondary_files["secondaryFiles"]:
sf_bn = sf.get("basename", "")
sf_loc = sf.get("location", "")
if ("$(" not in sf_bn) and ("${" not in sf_bn):
if ("$(" not in sf_loc) and ("${" not in sf_loc):
intermediate_secondary_files.append(sf)
# remove secondary files that are not present in the filestore
# i.e. 'file://' only gets converted to 'toilfs:' upon a successful import
for sf in intermediate_secondary_files:
sf_loc = sf.get("location", "")
# directories aren't imported, so don't worry about them
if sf_loc.startswith("toilfs:") or sf.get("class", "") == "Directory":
final_secondary_files.append(sf)
return final_secondary_files
def determine_load_listing(tool: ToilCommandLineTool):
"""
Determine the directory.listing feature in CWL.
In CWL, any input directory can have a DIRECTORY_NAME.listing (where
DIRECTORY_NAME is any variable name) set to one of the following three
options:
no_listing: DIRECTORY_NAME.listing will be undefined.
e.g. inputs.DIRECTORY_NAME.listing == unspecified
shallow_listing: DIRECTORY_NAME.listing will return a list one level
deep of DIRECTORY_NAME's contents.
e.g. inputs.DIRECTORY_NAME.listing == [items in directory]
inputs.DIRECTORY_NAME.listing[0].listing == undefined
inputs.DIRECTORY_NAME.listing.length == # of items in directory
deep_listing: DIRECTORY_NAME.listing will return a list of the entire
contents of DIRECTORY_NAME.
e.g. inputs.DIRECTORY_NAME.listing == [items in directory]
inputs.DIRECTORY_NAME.listing[0].listing == [items
in subdirectory if it exists and is the first item listed]
inputs.DIRECTORY_NAME.listing.length == # of items in directory
See: https://www.commonwl.org/v1.1/CommandLineTool.html#LoadListingRequirement
https://www.commonwl.org/v1.1/CommandLineTool.html#LoadListingEnum
DIRECTORY_NAME.listing should be determined first from loadListing.
If that's not specified, from LoadListingRequirement.
Else, default to "no_listing" if unspecified.
:param tool: ToilCommandLineTool
:return str: One of 'no_listing', 'shallow_listing', or 'deep_listing'.
"""
load_listing_req, _ = tool.get_requirement("LoadListingRequirement")
load_listing_tool_req = (
load_listing_req.get("loadListing", "no_listing")
if load_listing_req
else "no_listing"
)
load_listing = tool.tool.get("loadListing", None) or load_listing_tool_req
listing_choices = ("no_listing", "shallow_listing", "deep_listing")
if load_listing not in listing_choices:
raise ValueError(
f'Unknown loadListing specified: "{load_listing}". Valid choices: {listing_choices}'
)
return load_listing
usage_message = "\n\n" + textwrap.dedent(
f"""
* All positional arguments [cwl, yml_or_json] must always be specified last for toil-cwl-runner.
Note: If you're trying to specify a jobstore, please use --jobStore.
Usage: toil-cwl-runner [options] example.cwl example-job.yaml
Example: toil-cwl-runner \\
--jobStore aws:us-west-2:jobstore \\
--realTimeLogging \\
--logInfo \\
example.cwl \\
example-job.yaml
"""[
1:
]
)
def main(args: Union[List[str]] = None, stdout: TextIO = sys.stdout) -> int:
"""Run the main loop for toil-cwl-runner."""
# Remove cwltool logger's stream handler so it uses Toil's
cwllogger.removeHandler(defaultStreamHandler)
if args is None:
args = sys.argv[1:]
config = Config()
config.disableChaining = True
config.cwl = True
parser = argparse.ArgumentParser()
addOptions(parser, config)
parser.add_argument("cwltool", type=str)
parser.add_argument("cwljob", nargs=argparse.REMAINDER)
# Will override the "jobStore" positional argument, enables
# user to select jobStore or get a default from logic one below.
parser.add_argument("--jobStore", "--jobstore", dest="jobStore", type=str)
parser.add_argument("--not-strict", action="store_true")
parser.add_argument(
"--enable-dev",
action="store_true",
help="Enable loading and running development versions of CWL",
)
parser.add_argument("--quiet", dest="logLevel", action="store_const", const="ERROR")
parser.add_argument("--basedir", type=str) # TODO: Might be hard-coded?
parser.add_argument("--outdir", type=str, default=os.getcwd())
parser.add_argument("--version", action="version", version=baseVersion)
dockergroup = parser.add_mutually_exclusive_group()
dockergroup.add_argument(
"--user-space-docker-cmd",
help="(Linux/OS X only) Specify a user space docker command (like "
"udocker or dx-docker) that will be used to call 'pull' and 'run'",
)
dockergroup.add_argument(
"--singularity",
action="store_true",
default=False,
help="[experimental] Use Singularity runtime for running containers. "
"Requires Singularity v2.6.1+ and Linux with kernel version v3.18+ or "
"with overlayfs support backported.",
)
dockergroup.add_argument(
"--no-container",
action="store_true",
help="Do not execute jobs in a "
"Docker container, even when `DockerRequirement` "
"is specified under `hints`.",
)
dockergroup.add_argument(
"--leave-container",
action="store_false",
default=True,
help="Do not delete Docker container used by jobs after they exit",
dest="rm_container",
)
parser.add_argument(
"--preserve-environment",
type=str,
nargs="+",
help="Preserve specified environment variables when running"
" CommandLineTools",
metavar=("VAR1 VAR2"),
default=("PATH",),
dest="preserve_environment",
)
parser.add_argument(
"--preserve-entire-environment",
action="store_true",
help="Preserve all environment variable when running " "CommandLineTools.",
default=False,
dest="preserve_entire_environment",
)
parser.add_argument(
"--destBucket",
type=str,
help="Specify a cloud bucket endpoint for output files.",
)
parser.add_argument("--beta-dependency-resolvers-configuration", default=None)
parser.add_argument("--beta-dependencies-directory", default=None)
parser.add_argument("--beta-use-biocontainers", default=None, action="store_true")
parser.add_argument("--beta-conda-dependencies", default=None, action="store_true")
parser.add_argument(
"--tmpdir-prefix",
type=Text,
help="Path prefix for temporary directories",
default="tmp",
)
parser.add_argument(
"--tmp-outdir-prefix",
type=Text,
help="Path prefix for intermediate output directories",
default="tmp",
)
parser.add_argument(
"--force-docker-pull",
action="store_true",
default=False,
dest="force_docker_pull",
help="Pull latest docker image even if it is locally present",
)
parser.add_argument(
"--no-match-user",
action="store_true",
default=False,
help="Disable passing the current uid to `docker run --user`",
)
parser.add_argument(
"--no-read-only",
action="store_true",
default=False,
help="Do not set root directory in the container as read-only",
)
parser.add_argument(
"--strict-memory-limit",
action="store_true",
help="When running with "
"software containers and the Docker engine, pass either the "
"calculated memory allocation from ResourceRequirements or the "
"default of 1 gigabyte to Docker's --memory option.",
)
parser.add_argument(
"--relax-path-checks",
action="store_true",
default=False,
help="Relax requirements on path names to permit "
"spaces and hash characters.",
dest="relax_path_checks",
)
parser.add_argument(
"--default-container",
help="Specify a default docker container that will be "
"used if the workflow fails to specify one.",
)
provgroup = parser.add_argument_group(
"Options for recording provenance " "information of the execution"
)
provgroup.add_argument(
"--provenance",
help="Save provenance to specified folder as a "
"Research Object that captures and aggregates "
"workflow execution and data products.",
type=Text,
)
provgroup.add_argument(
"--enable-user-provenance",
default=False,
action="store_true",
help="Record user account info as part of provenance.",
dest="user_provenance",
)
provgroup.add_argument(
"--disable-user-provenance",
default=False,
action="store_false",
help="Do not record user account info in provenance.",
dest="user_provenance",
)
provgroup.add_argument(
"--enable-host-provenance",
default=False,
action="store_true",
help="Record host info as part of provenance.",
dest="host_provenance",
)
provgroup.add_argument(
"--disable-host-provenance",
default=False,
action="store_false",
help="Do not record host info in provenance.",
dest="host_provenance",
)
provgroup.add_argument(
"--orcid",
help="Record user ORCID identifier as part of "
"provenance, e.g. https://orcid.org/0000-0002-1825-0097 "
"or 0000-0002-1825-0097. Alternatively the environment variable "
"ORCID may be set.",
dest="orcid",
default=os.environ.get("ORCID", ""),
type=Text,
)
provgroup.add_argument(
"--full-name",
help="Record full name of user as part of provenance, "
"e.g. Josiah Carberry. You may need to use shell quotes to preserve "
"spaces. Alternatively the environment variable CWL_FULL_NAME may "
"be set.",
dest="cwl_full_name",
default=os.environ.get("CWL_FULL_NAME", ""),
type=Text,
)
# Problem: we want to keep our job store somewhere auto-generated based on
# our options, unless overridden by... an option. So we will need to parse
# options twice, because we need to feed the parser the job store.
# Propose a local workdir, probably under /tmp.
# mkdtemp actually creates the directory, but
# toil requires that the directory not exist,
# since it is going to be our jobstore,
# so make it and delete it and allow
# toil to create it again (!)
workdir = tempfile.mkdtemp()
os.rmdir(workdir)
# we use the workdir as the default jobStore:
options = parser.parse_args([workdir] + args)
# if tmpdir_prefix is not the default value, set workDir if unset, and move
# workdir and the job store under it
if options.tmpdir_prefix != "tmp":
workdir = cwltool.utils.create_tmp_dir(options.tmpdir_prefix)
os.rmdir(workdir)
# Re-parse arguments with the new default jobstore under the temp dir.
# It still might be overridden by a --jobStore option
options = parser.parse_args([workdir] + args)
if options.workDir is None:
# We need to override workDir because by default Toil will pick
# somewhere under the system temp directory if unset, ignoring
# --tmpdir-prefix.
#
# If set, workDir needs to exist, so we directly use the prefix
options.workDir = cwltool.utils.create_tmp_dir(options.tmpdir_prefix)
if options.provisioner and not options.jobStore:
raise NoSuchJobStoreException(
"Please specify a jobstore with the --jobStore option when "
"specifying a provisioner."
)
if options.batchSystem == "kubernetes":
options.singularity = True
use_container = not options.no_container
if options.logLevel:
# Make sure cwltool uses Toil's log level.
# Applies only on the leader.
cwllogger.setLevel(options.logLevel.upper())
outdir = os.path.abspath(options.outdir)
tmp_outdir_prefix = os.path.abspath(options.tmp_outdir_prefix)
fileindex = dict() # type: ignore
existing = dict() # type: ignore
conf_file = getattr(options, "beta_dependency_resolvers_configuration", None)
use_conda_dependencies = getattr(options, "beta_conda_dependencies", None)
job_script_provider = None
if conf_file or use_conda_dependencies:
dependencies_configuration = DependenciesConfiguration(options)
job_script_provider = dependencies_configuration
options.default_container = None
runtime_context = cwltool.context.RuntimeContext(vars(options))
runtime_context.find_default_container = functools.partial(
find_default_container, options
)
runtime_context.workdir = workdir # type: ignore
runtime_context.move_outputs = "leave"
runtime_context.rm_tmpdir = False
loading_context = cwltool.context.LoadingContext(vars(options))
if options.provenance:
research_obj = cwltool.provenance.ResearchObject(
temp_prefix_ro=options.tmp_outdir_prefix,
orcid=options.orcid,
full_name=options.cwl_full_name,
fsaccess=runtime_context.make_fs_access(""),
)
runtime_context.research_obj = research_obj
with Toil(options) as toil:
if options.restart:
outobj = toil.restart()
else:
loading_context.hints = [
{
"class": "ResourceRequirement",
"coresMin": toil.config.defaultCores,
"ramMin": toil.config.defaultMemory / (2 ** 20),
"outdirMin": toil.config.defaultDisk / (2 ** 20),
"tmpdirMin": 0,
}
]
loading_context.construct_tool_object = toil_make_tool
loading_context.resolver = cwltool.resolver.tool_resolver
loading_context.strict = not options.not_strict
options.workflow = options.cwltool
options.job_order = options.cwljob
try:
uri, tool_file_uri = cwltool.load_tool.resolve_tool_uri(
options.cwltool,
loading_context.resolver,
loading_context.fetcher_constructor,
)
except schema_salad.exceptions.ValidationException:
print(
"\nYou may be getting this error because your arguments are incorrect or out of order."
+ usage_message,
file=sys.stderr,
)
raise
options.tool_help = None
options.debug = options.logLevel == "DEBUG"
job_order_object, options.basedir, jobloader = cwltool.main.load_job_order(
options,
sys.stdin,
loading_context.fetcher_constructor,
loading_context.overrides_list,
tool_file_uri,
)
loading_context, workflowobj, uri = cwltool.load_tool.fetch_document(
uri, loading_context
)
loading_context, uri = cwltool.load_tool.resolve_and_validate_document(
loading_context, workflowobj, uri
)
loading_context.overrides_list.extend(
cast(
List[CWLObjectType],
loading_context.metadata.get("cwltool:overrides", []),
)
)
document_loader = loading_context.loader
metadata = loading_context.metadata
processobj = document_loader.idx
if options.provenance and runtime_context.research_obj:
runtime_context.research_obj.packed_workflow(
cwltool.main.print_pack(loading_context, uri)
)
try:
tool = cwltool.load_tool.make_tool(uri, loading_context)
except cwltool.process.UnsupportedRequirement as err:
logging.error(err)
return 33
runtime_context.secret_store = SecretStore()
try:
initialized_job_order = cwltool.main.init_job_order(
job_order_object,
options,
tool,
jobloader,
sys.stdout,
secret_store=runtime_context.secret_store,
)
except SystemExit as e:
if e.code == 2: # raised by argparse's parse_args() function
print(
"\nIf both a CWL file and an input object (YAML/JSON) file were "
"provided, this may be the argument order." + usage_message,
file=sys.stderr,
)
raise
fs_access = cwltool.stdfsaccess.StdFsAccess(options.basedir)
fill_in_defaults(tool.tool["inputs"], initialized_job_order, fs_access)
for inp in tool.tool["inputs"]:
def set_secondary(fileobj):
if isinstance(fileobj, Mapping) and fileobj.get("class") == "File":
if "secondaryFiles" not in fileobj:
# inits all secondary files with 'file://' schema
# later changed to 'toilfs:' when imported into the jobstore
fileobj["secondaryFiles"] = [
{
"location": cwltool.builder.substitute(
fileobj["location"], sf["pattern"]
),
"class": "File",
}
for sf in inp["secondaryFiles"]
]
if isinstance(fileobj, MutableSequence):
for entry in fileobj:
set_secondary(entry)
if shortname(inp["id"]) in initialized_job_order and inp.get(
"secondaryFiles"
):
set_secondary(initialized_job_order[shortname(inp["id"])])
runtime_context.use_container = use_container
runtime_context.tmp_outdir_prefix = os.path.realpath(tmp_outdir_prefix)
runtime_context.job_script_provider = job_script_provider
runtime_context.force_docker_pull = options.force_docker_pull
runtime_context.no_match_user = options.no_match_user
runtime_context.no_read_only = options.no_read_only
runtime_context.basedir = options.basedir
runtime_context.move_outputs = "move"
# We instantiate an early builder object here to populate indirect
# secondaryFile references using cwltool's library because we need
# to resolve them before toil imports them into the filestore.
# A second builder will be built in the job's run method when toil
# actually starts the cwl job.
builder = tool._init_job(initialized_job_order, runtime_context)
# make sure this doesn't add listing items; if shallow_listing is
# selected, it will discover dirs one deep and then again later on
# (producing 2+ deep listings instead of only 1)
builder.loadListing = "no_listing"
builder.bind_input(
tool.inputs_record_schema,
initialized_job_order,
discover_secondaryFiles=True,
)
def path_to_loc(obj):
if "location" not in obj and "path" in obj:
obj["location"] = obj["path"]
del obj["path"]
def import_files(inner_tool):
visit_class(inner_tool, ("File", "Directory"), path_to_loc)
visit_class(
inner_tool, ("File",), functools.partial(add_sizes, fs_access)
)
normalizeFilesDirs(inner_tool)
adjustFileObjs(
inner_tool,
functools.partial(
uploadFile,
toil.importFile,
fileindex,
existing,
skip_broken=True,
),
)
# files with the 'file://' uri are imported into the jobstore and
# changed to 'toilfs:'
import_files(initialized_job_order)
visitSteps(tool, import_files)
for job_name, job_params in initialized_job_order.items():
rm_unprocessed_secondary_files(job_params)
try:
wf1, _ = makeJob(
tool=tool,
jobobj={},
runtime_context=runtime_context,
conditional=None,
)
except cwltool.process.UnsupportedRequirement as err:
logging.error(err)
return 33
wf1.cwljob = initialized_job_order
outobj = toil.start(wf1)
outobj = resolve_dict_w_promises(outobj)
# Stage files. Specify destination bucket if specified in CLI
# options. If destination bucket not passed in,
# options.destBucket's value will be None.
toilStageFiles(toil, outobj, outdir, destBucket=options.destBucket)
if runtime_context.research_obj is not None:
runtime_context.research_obj.create_job(outobj, True)
def remove_at_id(doc):
if isinstance(doc, MutableMapping):
for key in list(doc.keys()):
if key == "@id":
del doc[key]
else:
value = doc[key]
if isinstance(value, MutableMapping):
remove_at_id(value)
if isinstance(value, MutableSequence):
for entry in value:
if isinstance(value, MutableMapping):
remove_at_id(entry)
remove_at_id(outobj)
visit_class(
outobj,
("File",),
functools.partial(add_sizes, runtime_context.make_fs_access("")),
)
prov_dependencies = cwltool.main.prov_deps(
workflowobj, document_loader, uri
)
runtime_context.research_obj.generate_snapshot(prov_dependencies)
runtime_context.research_obj.close(options.provenance)
if not options.destBucket:
visit_class(
outobj,
("File",),
functools.partial(
compute_checksums, cwltool.stdfsaccess.StdFsAccess("")
),
)
visit_class(outobj, ("File",), MutationManager().unset_generation)
stdout.write(json.dumps(outobj, indent=4))
return 0
def find_default_container(
args: argparse.Namespace, builder: cwltool.builder.Builder
) -> str:
"""Find the default constuctor by consulting a Toil.options object."""
if args.default_container:
return args.default_container
if args.beta_use_biocontainers:
return get_container_from_software_requirements(True, builder)
return None
|
the-stack_0_11886 | # coding=utf-8
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import (
DUMMY_UNKWOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
require_scatter,
require_torch,
slow,
)
if is_torch_available():
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeq2SeqLM,
AutoModelForSequenceClassification,
AutoModelForTableQuestionAnswering,
AutoModelForTokenClassification,
AutoModelWithLMHead,
BertConfig,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertModel,
FunnelBaseModel,
FunnelModel,
GPT2Config,
GPT2LMHeadModel,
RobertaForMaskedLM,
T5Config,
T5ForConditionalGeneration,
TapasConfig,
TapasForQuestionAnswering,
)
from transformers.models.auto.modeling_auto import (
MODEL_FOR_CAUSAL_LM_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
MODEL_FOR_PRETRAINING_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
MODEL_WITH_HEADS_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
)
from transformers.models.bert.modeling_bert import BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpt2.modeling_gpt2 import GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.t5.modeling_t5 import T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tapas import TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
@require_torch
class AutoModelTest(unittest.TestCase):
@slow
def test_model_from_pretrained(self):
for model_name in BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, BertConfig)
model = AutoModel.from_pretrained(model_name)
model, loading_info = AutoModel.from_pretrained(model_name, output_loading_info=True)
self.assertIsNotNone(model)
self.assertIsInstance(model, BertModel)
for value in loading_info.values():
self.assertEqual(len(value), 0)
@slow
def test_model_for_pretraining_from_pretrained(self):
for model_name in BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, BertConfig)
model = AutoModelForPreTraining.from_pretrained(model_name)
model, loading_info = AutoModelForPreTraining.from_pretrained(model_name, output_loading_info=True)
self.assertIsNotNone(model)
self.assertIsInstance(model, BertForPreTraining)
# Only one value should not be initialized and in the missing keys.
missing_keys = loading_info.pop("missing_keys")
self.assertListEqual(["cls.predictions.decoder.bias"], missing_keys)
for key, value in loading_info.items():
self.assertEqual(len(value), 0)
@slow
def test_lmhead_model_from_pretrained(self):
for model_name in BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, BertConfig)
model = AutoModelWithLMHead.from_pretrained(model_name)
model, loading_info = AutoModelWithLMHead.from_pretrained(model_name, output_loading_info=True)
self.assertIsNotNone(model)
self.assertIsInstance(model, BertForMaskedLM)
@slow
def test_model_for_causal_lm(self):
for model_name in GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, GPT2Config)
model = AutoModelForCausalLM.from_pretrained(model_name)
model, loading_info = AutoModelForCausalLM.from_pretrained(model_name, output_loading_info=True)
self.assertIsNotNone(model)
self.assertIsInstance(model, GPT2LMHeadModel)
@slow
def test_model_for_masked_lm(self):
for model_name in BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, BertConfig)
model = AutoModelForMaskedLM.from_pretrained(model_name)
model, loading_info = AutoModelForMaskedLM.from_pretrained(model_name, output_loading_info=True)
self.assertIsNotNone(model)
self.assertIsInstance(model, BertForMaskedLM)
@slow
def test_model_for_encoder_decoder_lm(self):
for model_name in T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, T5Config)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
model, loading_info = AutoModelForSeq2SeqLM.from_pretrained(model_name, output_loading_info=True)
self.assertIsNotNone(model)
self.assertIsInstance(model, T5ForConditionalGeneration)
@slow
def test_sequence_classification_model_from_pretrained(self):
for model_name in BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, BertConfig)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
model, loading_info = AutoModelForSequenceClassification.from_pretrained(
model_name, output_loading_info=True
)
self.assertIsNotNone(model)
self.assertIsInstance(model, BertForSequenceClassification)
@slow
def test_question_answering_model_from_pretrained(self):
for model_name in BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, BertConfig)
model = AutoModelForQuestionAnswering.from_pretrained(model_name)
model, loading_info = AutoModelForQuestionAnswering.from_pretrained(model_name, output_loading_info=True)
self.assertIsNotNone(model)
self.assertIsInstance(model, BertForQuestionAnswering)
@slow
@require_scatter
def test_table_question_answering_model_from_pretrained(self):
for model_name in TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, TapasConfig)
model = AutoModelForTableQuestionAnswering.from_pretrained(model_name)
model, loading_info = AutoModelForTableQuestionAnswering.from_pretrained(
model_name, output_loading_info=True
)
self.assertIsNotNone(model)
self.assertIsInstance(model, TapasForQuestionAnswering)
@slow
def test_token_classification_model_from_pretrained(self):
for model_name in BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
config = AutoConfig.from_pretrained(model_name)
self.assertIsNotNone(config)
self.assertIsInstance(config, BertConfig)
model = AutoModelForTokenClassification.from_pretrained(model_name)
model, loading_info = AutoModelForTokenClassification.from_pretrained(model_name, output_loading_info=True)
self.assertIsNotNone(model)
self.assertIsInstance(model, BertForTokenClassification)
def test_from_pretrained_identifier(self):
model = AutoModelWithLMHead.from_pretrained(SMALL_MODEL_IDENTIFIER)
self.assertIsInstance(model, BertForMaskedLM)
self.assertEqual(model.num_parameters(), 14410)
self.assertEqual(model.num_parameters(only_trainable=True), 14410)
def test_from_identifier_from_model_type(self):
model = AutoModelWithLMHead.from_pretrained(DUMMY_UNKWOWN_IDENTIFIER)
self.assertIsInstance(model, RobertaForMaskedLM)
self.assertEqual(model.num_parameters(), 14410)
self.assertEqual(model.num_parameters(only_trainable=True), 14410)
def test_from_pretrained_with_tuple_values(self):
# For the auto model mapping, FunnelConfig has two models: FunnelModel and FunnelBaseModel
model = AutoModel.from_pretrained("sgugger/funnel-random-tiny")
self.assertIsInstance(model, FunnelModel)
config = copy.deepcopy(model.config)
config.architectures = ["FunnelBaseModel"]
model = AutoModel.from_config(config)
self.assertIsInstance(model, FunnelBaseModel)
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(tmp_dir)
model = AutoModel.from_pretrained(tmp_dir)
self.assertIsInstance(model, FunnelBaseModel)
def test_parents_and_children_in_mappings(self):
# Test that the children are placed before the parents in the mappings, as the `instanceof` will be triggered
# by the parents and will return the wrong configuration type when using auto models
mappings = (
MODEL_MAPPING,
MODEL_WITH_HEADS_MAPPING,
MODEL_FOR_PRETRAINING_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
MODEL_FOR_CAUSAL_LM_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
)
for mapping in mappings:
mapping = tuple(mapping.items())
for index, (child_config, child_model) in enumerate(mapping[1:]):
for parent_config, parent_model in mapping[: index + 1]:
assert not issubclass(
child_config, parent_config
), f"{child_config.__name__} is child of {parent_config.__name__}"
# Tuplify child_model and parent_model since some of them could be tuples.
if not isinstance(child_model, (list, tuple)):
child_model = (child_model,)
if not isinstance(parent_model, (list, tuple)):
parent_model = (parent_model,)
for child, parent in [(a, b) for a in child_model for b in parent_model]:
assert not issubclass(child, parent), f"{child.__name__} is child of {parent.__name__}"
|
the-stack_0_11887 | """
University of Minnesota
Aerospace Engineering and Mechanics - UAV Lab
Copyright 2019 Regents of the University of Minnesota
See: LICENSE.md for complete license details
Author: Chris Regan
Analysis for Thor RTSM
"""
#%%
# Import Libraries
import numpy as np
import matplotlib.pyplot as plt
# Hack to allow loading the Core package
if __name__ == "__main__" and __package__ is None:
from sys import path, argv
from os.path import dirname, abspath, join
path.insert(0, abspath(join(dirname(argv[0]), "..")))
path.insert(0, abspath(join(dirname(argv[0]), "..", 'Core')))
del path, argv, dirname, abspath, join
from Core import Loader
from Core import OpenData
# plt.rcParams.update({
# "text.usetex": True,
# "font.family": "serif",
# "font.serif": ["Palatino"],
# "font.size": 10
# })
# Constants
hz2rps = 2 * np.pi
rps2hz = 1 / hz2rps
#%% File Lists
import os.path as path
# pathBase = path.join('/home', 'rega0051', 'FlightArchive', 'Thor')
pathBase = path.join('G:', 'Shared drives', 'UAVLab', 'Flight Data', 'Thor')
fileList = {}
flt = 'FLT126'
fileList[flt] = {}
fileList[flt]['log'] = path.join(pathBase, 'Thor' + flt, 'Thor' + flt + '.h5')
fileList[flt]['config'] = path.join(pathBase, 'Thor' + flt, 'thor.json')
fileList[flt]['def'] = path.join(pathBase, 'Thor' + flt, 'thor_def.json')
flt = 'FLT127'
fileList[flt] = {}
fileList[flt]['log'] = path.join(pathBase, 'Thor' + flt, 'Thor' + flt + '.h5')
fileList[flt]['config'] = path.join(pathBase, 'Thor' + flt, 'thor.json')
fileList[flt]['def'] = path.join(pathBase, 'Thor' + flt, 'thor_def.json')
flt = 'FLT128'
fileList[flt] = {}
fileList[flt]['log'] = path.join(pathBase, 'Thor' + flt, 'Thor' + flt + '.h5')
fileList[flt]['config'] = path.join(pathBase, 'Thor' + flt, 'thor.json')
fileList[flt]['def'] = path.join(pathBase, 'Thor' + flt, 'thor_def.json')
#%%
from Core import FreqTrans
rtsmSegList = [
# {'flt': 'FLT126', 'seg': ('time_us', [875171956 , 887171956], 'FLT126 - RTSM - Nominal Gain, 4 deg amp'), 'color': 'k'},
# {'flt': 'FLT126', 'seg': ('time_us', [829130591 , 841130591], 'FLT126 - RTSM Route - Nominal Gain, 4 deg amp'), 'color': 'k'},
# {'flt': 'FLT127', 'seg': ('time_us', [641655909 , 653655909], 'FLT127 - RTSM Route - Nominal Gain, 4 deg amp'), 'color': 'k'}, # Yaw controller in-op??
# {'flt': 'FLT128', 'seg': ('time_us', [700263746 , 712263746 ], 'FLT128 - RTSM Route - Nominal Gain, 4 deg amp'), 'color': 'k'}, # Interesting Roll Margin vs. Uncertainty
# {'flt': 'FLT128', 'seg': ('time_us', [831753831 , 843753831 ], 'FLT128 - RTSM Route - Nominal Gain, 4 deg amp'), 'color': 'k'},
# {'flt': 'FLT128', 'seg': ('time_us', [ 959859721 , 971859721 ], 'FLT128 - RTSM Route - Nominal Gain, 4 deg amp'), 'color': 'k'}, # Not good
# {'flt': 'FLT126', 'seg': ('time_us', [928833763 , 940833763], 'FLT126 - RTSM Large - Nominal Gain, 8 deg amp'), 'color': 'r'},
# {'flt': 'FLT127', 'seg': ('time_us', [698755386 , 707255278], 'FLT127 - RTSM Large Route - Nominal Gain, 8 deg amp'), 'color': 'r'}, # Yaw controller in-op??
# {'flt': 'FLT128', 'seg': ('time_us', [779830919 , 791830919 ], 'FLT128 - RTSM Large Route - Nominal Gain, 8 deg amp'), 'color': 'r'},
# {'flt': 'FLT128', 'seg': ('time_us', [900237086 , 912237086 ], 'FLT128 - RTSM Large Route - Nominal Gain, 8 deg amp'), 'color': 'r'},
#
# {'flt': 'FLT126', 'seg': ('time_us', [902952886 , 924952886], 'FLT126 - RTSM Long - Nominal Gain, 4 deg amp'), 'color': 'b'},
# {'flt': 'FLT127', 'seg': ('time_us', [657015836 , 689015836], 'FLT127 - RTSM Long Route - Nominal Gain, 4 deg amp'), 'color': 'b'}, # Yaw controller in-op??
# {'flt': 'FLT128', 'seg': ('time_us', [714385469 , 746385469 ], 'FLT128 - RTSM Long Route - Nominal Gain, 4 deg amp'), 'color': 'b'},
{'flt': 'FLT128', 'seg': ('time_us', [847254621 , 879254621 ], 'FLT128 - RTSM Long Route - Nominal Gain, 4 deg amp'), 'color': 'b'}, # Best
# {'flt': 'FLT127', 'seg': ('time_us', [1209355236 , 1221535868], 'FLT127 - RTSM LongLarge Route - Nominal Gain, 8 deg amp'), 'color': 'm'}, # Yaw controller in-op??
# {'flt': 'FLT128', 'seg': ('time_us', [794251787 , 826251787 ], 'FLT128 - RTSM LongLarge Route - Nominal Gain, 8 deg amp'), 'color': 'm'},
# {'flt': 'FLT128', 'seg': ('time_us', [921438015 , 953438015 ], 'FLT128 - RTSM LongLarge Route - Nominal Gain, 8 deg amp'), 'color': 'm'},
# {'flt': 'FLT126', 'seg': ('time_us', [981115495 , 993115495], 'FLT126 - RTSM - High Gain, 4 deg amp')},
# {'flt': 'FLT126', 'seg': ('time_us', [689907125 , 711907125], 'FLT126 - RTSM Long - High Gain, 4 deg amp')},
# {'flt': 'FLT126', 'seg': ('time_us', [728048050 , 740048050], 'FLT126 - RTSM Large - High Gain, 8 deg amp')},
#
]
oDataSegs = []
for rtsmSeg in rtsmSegList:
fltNum = rtsmSeg['flt']
fileLog = fileList[fltNum]['log']
fileConfig = fileList[fltNum]['config']
# Load
h5Data = Loader.Load_h5(fileLog) # RAPTRS log data as hdf5
sysConfig = Loader.JsonRead(fileConfig)
oData = Loader.OpenData_RAPTRS(h5Data, sysConfig)
oData['cmdRoll_FF'] = h5Data['Control']['cmdRoll_pidFF']
oData['cmdRoll_FB'] = h5Data['Control']['cmdRoll_pidFB']
oData['cmdPitch_FF'] = h5Data['Control']['cmdPitch_pidFF']
oData['cmdPitch_FB'] = h5Data['Control']['cmdPitch_pidFB']
oData['cmdYaw_FF'] = h5Data['Control']['refPsi_rad']
oData['cmdYaw_FB'] = h5Data['Control']['cmdYaw_damp_rps']
# Segments
rtsmSeg['seg'][1][0] += 1e6
rtsmSeg['seg'][1][1] += -1e6 + 50e3
oDataSegs.append(OpenData.Segment(oData, rtsmSeg['seg']))
#%%
sigExcList = ['cmdRoll_rps', 'cmdPitch_rps', 'cmdYaw_rps']
sigFbList = ['cmdRoll_FB', 'cmdPitch_FB', 'cmdYaw_FB']
sigFfList = ['cmdRoll_FF', 'cmdPitch_FF', 'cmdYaw_FF']
#sigSensList = ['wB_I_rps', 'cmdPitch_FF', 'cmdYaw_FF']
freqExc_rps = []
freqExc_rps.append( np.array(sysConfig['Excitation']['OMS_RTSM_1']['Frequency']))
freqExc_rps.append( np.array(sysConfig['Excitation']['OMS_RTSM_2']['Frequency']))
freqExc_rps.append( np.array(sysConfig['Excitation']['OMS_RTSM_3']['Frequency']))
vCmdList = []
vExcList = []
vFbList = []
vFfList = []
ySensList = []
for iSeg, seg in enumerate(oDataSegs):
vCmd = np.zeros((len(sigExcList), len(seg['time_s'])))
vExc = np.zeros((len(sigExcList), len(seg['time_s'])))
vFb = np.zeros((len(sigExcList), len(seg['time_s'])))
vFf = np.zeros((len(sigExcList), len(seg['time_s'])))
ySens = np.zeros((len(sigExcList), len(seg['time_s'])))
for iSig, sigExc in enumerate(sigExcList):
sigFb = sigFbList[iSig]
sigFf = sigFfList[iSig]
vCmd[iSig] = seg['Control'][sigExc]
vExc[iSig] = seg['Excitation'][sigExc]
# vFb[iSig] = seg[sigFb]
vFb[iSig][1:-1] = seg[sigFb][0:-2] # Shift the time of the output into next frame
vFf[iSig] = seg[sigFf]
ySens[iSig] = seg['wB_I_rps'][iSig]
vCmdList.append(vCmd)
vExcList.append(vExc)
vFbList.append(vFb)
vFfList.append(vFf)
ySensList.append(ySens)
plt.plot(oDataSegs[iSeg]['time_s'], oDataSegs[iSeg]['vIas_mps'])
plt.plot(oDataSegs[iSeg]['time_s'], vExcList[iSeg][0])
plt.plot(oDataSegs[iSeg]['time_s'], vExcList[iSeg][1])
plt.plot(oDataSegs[iSeg]['time_s'], vExcList[iSeg][2])
plt.plot(oDataSegs[iSeg]['time_s'], vFbList[iSeg][0])
plt.plot(oDataSegs[iSeg]['time_s'], vFbList[iSeg][1])
plt.plot(oDataSegs[iSeg]['time_s'], vFbList[iSeg][2])
#%% Estimate the frequency response function
# Define the excitation frequencies
freqRate_hz = 50
freqRate_rps = freqRate_hz * hz2rps
optSpec = FreqTrans.OptSpect(dftType = 'czt', freqRate = freqRate_rps, smooth = ('box', 3), winType = ('tukey', 0.2), detrendType = 'Linear')
# Excited Frequencies per input channel
optSpec.freq = np.asarray(freqExc_rps)
# FRF Estimate
LiEstNomList = []
LiEstCohList = []
svLiEstNomList = []
for iSeg, seg in enumerate(oDataSegs):
freq_rps, Teb, Ceb, Pee, Pbb, Peb = FreqTrans.FreqRespFuncEst(vExcList[iSeg], vExcList[iSeg] + vFbList[iSeg], optSpec)
# _ , Tev, Cev, _ , Pvv, Pev = FreqTrans.FreqRespFuncEst(vExcList[iSeg], vCmdList[iSeg], optSpec)
freq_hz = freq_rps * rps2hz
I3 = np.repeat([np.eye(3)], Teb.shape[-1], axis=0).T
SaEstNom = Teb # Sa = I + Teb
SaEstCoh = Ceb # Cxy = np.abs(Sxy)**2 / (Sxx * Syy) = (np.abs(Sxy) / Sxx) * (np.abs(Sxy) / Syy)
# T = TNom = (uCtrl + uExc) / uExc - uNull / uExc
# Li = inv(TNom + TUnc) - I = LiEstNom + LiEstUnc
# LiEstNom = -I + TNom^-1
# LiEstUnc = -(I + TNom^-1 * TUnc)^-1 * TNom^-1 * TUnc * TNom^-1
LiEstNom = np.zeros_like(SaEstNom, dtype = complex)
LiEstCoh = np.zeros_like(SaEstCoh)
inv = np.linalg.inv
for i in range(SaEstNom.shape[-1]):
SaEstNomElem = SaEstNom[...,i]
SaEstNomInvElem = inv(SaEstNomElem)
LiEstNom[...,i] = -np.eye(3) + SaEstNomInvElem
# LiEstCoh[...,i] = -np.eye(3) + inv(SaEstCoh[...,i])
LiEstCoh[...,i] = SaEstCoh[...,i]
LiEstNomList.append( LiEstNom )
LiEstCohList.append( LiEstCoh )
svLiEstNomList_seg = FreqTrans.Sigma( LiEstNom ) # Singular Value Decomp
svLiEstNomList.append(svLiEstNomList_seg)
T_InputNames = sigExcList
T_OutputNames = sigFbList
# Compute Gain, Phase, Crit Distance
gainLiEstNomList_mag = []
phaseLiEstNomList_deg = []
rCritLiEstNomList_mag = []
for iSeg in range(0, len(oDataSegs)):
gain_mag, phase_deg = FreqTrans.GainPhase(LiEstNomList[iSeg], magUnit = 'mag', phaseUnit = 'deg', unwrap = True)
gainLiEstNomList_mag.append(gain_mag)
phaseLiEstNomList_deg.append(phase_deg)
# rCritLiEstNom_mag, _, _ = FreqTrans.DistCrit(LiEstNomList[iSeg], typeUnc = 'ellipse')
rCritLiEstNom_mag, _, _ = FreqTrans.DistCritCirc(LiEstNomList[iSeg])
rCritLiEstNomList_mag.append(rCritLiEstNom_mag)
#%% Sigma Plot
fig = None
for iSeg in range(0, len(oDataSegs)):
Cmin = np.min(np.min(LiEstCohList[iSeg], axis = 0), axis = 0)
sNomMin = np.min(svLiEstNomList[iSeg], axis=0)
fig = FreqTrans.PlotSigma(freq_hz[0], svLiEstNomList[iSeg], coher_nd = Cmin, fig = fig, color = rtsmSegList[iSeg]['color'], linestyle = '-', label = oDataSegs[iSeg]['Desc'])
fig = FreqTrans.PlotSigma(freq_hz[0], 0.4 * np.ones_like(freq_hz[0]), color = 'r', linestyle = '--', fig = fig)
ax = fig.get_axes()
ax[0].set_xlim(0, 10)
# ax[0].set_ylim(0, 1)
#%% Disk Margin Plots
inPlot = sigExcList # Elements of sigExcList
outPlot = sigFbList # Elements of sigFbList
if False:
for iOut, outName in enumerate(outPlot):
for iIn, inName in enumerate(inPlot):
fig = None
for iSeg in range(0, len(oDataSegs)):
fig = FreqTrans.PlotSigma(freq_hz[0], rCritLiEstNomList_mag[iSeg][iOut, iIn], coher_nd = LiEstCohList[iSeg][iOut, iIn], fig = fig, color = rtsmSegList[iSeg]['color'], linestyle = '-', label = oDataSegs[iSeg]['Desc'])
fig = FreqTrans.PlotSigma(freq_hz[0], 0.4 * np.ones_like(freq_hz[0]), fig = fig, color = 'r', linestyle = '--')
fig.suptitle(inName + ' to ' + outName, size=20)
ax = fig.get_axes()
# ax[0].set_ylim(0, 2)
#%% Nyquist Plots
if False:
for iOut, outName in enumerate(outPlot):
for iIn, inName in enumerate(inPlot):
fig = None
for iSeg in range(0, len(oDataSegs)):
fig = FreqTrans.PlotNyquist(LiEstNomList[iSeg][iOut, iIn], fig = fig, color = rtsmSegList[iSeg]['color'], label = oDataSegs[iSeg]['Desc'])
fig = FreqTrans.PlotNyquist(np.asarray([-1+ 0j]), TUnc = np.asarray([0.4 + 0.4j]), fig = fig, fmt = '*r', label = 'Critical Region')
fig.suptitle(inName + ' to ' + outName, size=20)
ax = fig.get_axes()
ax[0].set_xlim(-3, 1)
ax[0].set_ylim(-2, 2)
#%% Bode Plots
if False:
for iOut, outName in enumerate(outPlot):
for iIn, inName in enumerate(inPlot):
fig = None
for iSeg in range(0, len(oDataSegs)):
fig = FreqTrans.PlotBode(freq_hz[0], gainLiEstNomList_mag[iSeg][iOut, iIn], phaseLiEstNomList_deg[iSeg][iOut, iIn], LiEstCohList[iSeg][iOut, iIn], fig = fig, color = rtsmSegList[iSeg]['color'], linestyle = '-', label = oDataSegs[iSeg]['Desc'])
fig.suptitle(inName + ' to ' + outName, size=20)
|
the-stack_0_11890 | #!/usr/bin/env python
"""
For more information on this API, please visit:
https://duo.com/docs/adminapi
-
Script Dependencies:
requests
Depencency Installation:
$ pip install -r requirements.txt
System Requirements:
- Duo MFA, Duo Access or Duo Beyond account with aministrator priviliedges.
- Duo Admin API enabled
Copyright (c) 2020 Cisco and/or its affiliates.
This software is licensed to you under the terms of the Cisco Sample
Code License, Version 1.1 (the "License"). You may obtain a copy of the
License at
https://developer.cisco.com/docs/licenses
All use of the material herein must be in accordance with the terms of
the License. All rights not expressly granted by the License are
reserved. Unless required by applicable law or agreed to separately in
writing, software distributed under the License is distributed on an "AS
IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
or implied.
"""
import json, base64, email, hmac, hashlib, urllib3, urllib
import requests
import pprint
import config
urllib3.disable_warnings()
# Imported API configuration variables
API_HOSTNAME = config.DUO_API_HOSTNAME
S_KEY = config.DUO_API_SECRET_KEY
I_KEY = config.DUO_API_INTEGRATION_KEY
# Script specific variables
METHOD = 'POST'
API_PATH = '/admin/v1/integrations'
NAME = 'Test Integration'
TYPE = 'authapi'
PARAMS = {
'name': NAME,
'type': TYPE
}
# Request signing helper function
def sign(method=METHOD,
host=API_HOSTNAME,
path=API_PATH,
params=PARAMS,
skey=S_KEY,
ikey=I_KEY):
"""
Return HTTP Basic Authentication ("Authorization" and "Date") headers.
method, host, path: strings from request
params: dict of request parameters
skey: secret key
ikey: integration key
"""
# create canonical string
now = email.utils.formatdate()
canon = [now, method.upper(), host.lower(), path]
args = []
for key in sorted(params.keys()):
val = params[key]
if isinstance(val, str):
val = val.encode("utf-8")
args.append(
'%s=%s' % (urllib.parse.quote(key, '~'), urllib.parse.quote(val, '~')))
canon.append('&'.join(args))
canon = '\n'.join(canon)
print(canon)
# sign canonical string
sig = hmac.new(skey.encode('utf-8'), canon.encode('utf-8'), hashlib.sha1)
auth = '%s:%s' % (ikey, sig.hexdigest())
print(auth)
encoded_auth = base64.b64encode(auth.encode('utf-8'))
# return headers
return {'Date': now, 'Authorization': 'Basic %s' % str(encoded_auth, 'UTF-8')}
if __name__ == "__main__":
url = "https://{}{}".format(API_HOSTNAME, API_PATH)
payload = PARAMS
request_headers = sign()
request_headers['Content-Type'] = 'application/x-www-form-urlencoded'
integration = requests.request(METHOD, url, data=payload, headers=request_headers, verify=False)
pprint.pprint(json.loads(integration.content)) |
the-stack_0_11892 |
# 升半音
def sharp_note(mynote, sharped):
'''
给一个音符升半音
'''
if sharped:
if mynote == '1':
return '2'
elif mynote == '2':
return '3'
elif mynote == '4':
return '5'
elif mynote == '5':
return '6'
elif mynote == '6':
return '7'
else:
return '(!)'
else:
if mynote == '3':
return '4'
elif mynote == '7':
return '[1]'
elif mynote == '1' or mynote == '2' or mynote == '4' or mynote == '5' or mynote == '6':
return '#' + mynote
else:
return mynote
def sharp_tune(old_tune):
'''
升半音
'''
str(old_tune)
sharping = False
new_tune = ''
for i in old_tune:
if i == '#':
sharping = True
else:
new_tune = new_tune + sharp_note(i, sharping)
sharping = False
return new_tune
def sharp_tune_more(old_tune, times):
'''
多次升半音
'''
for _ in range(times):
old_tune = sharp_tune(old_tune)
return old_tune
# print(sharp_tune(input('input:')))
|
the-stack_0_11894 | import collections
Set = set
try:
from collections import OrderedDict
except ImportError:
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.values():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running={}):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# -- the following methods are only used in Python 2.7 --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
KEY, PREV, NEXT = range(3)
class OrderedSet(collections.MutableSet):
"""
From: http://code.activestate.com/recipes/576694/
"""
def __init__(self, iterable=None):
self.end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.map = {} # key --> [key, prev, next]
if iterable is not None:
self |= iterable
def __len__(self):
return len(self.map)
def __contains__(self, key):
return key in self.map
def add(self, key):
if key not in self.map:
end = self.end
curr = end[PREV]
curr[NEXT] = end[PREV] = self.map[key] = [key, curr, end]
def discard(self, key):
if key in self.map:
key, prev, next = self.map.pop(key)
prev[NEXT] = next
next[PREV] = prev
def __iter__(self):
end = self.end
curr = end[NEXT]
while curr is not end:
yield curr[KEY]
curr = curr[NEXT]
def __reversed__(self):
end = self.end
curr = end[PREV]
while curr is not end:
yield curr[KEY]
curr = curr[PREV]
def pop(self, last=True):
if not self:
raise KeyError('set is empty')
key = next(reversed(self)) if last else next(iter(self))
self.discard(key)
return key
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self))
def __eq__(self, other):
if isinstance(other, OrderedSet):
return len(self) == len(other) and list(self) == list(other)
return set(self) == set(other)
def __del__(self):
self.clear() # remove circular references
|
the-stack_0_11897 | from typing import Any, Dict
from ..base import BaseDistiller, DistillationResult
class JsonDistiller(BaseDistiller):
def __call__(
self,
source: Dict[str, Any],
context: Dict[str, Any] = None,
raise_validation_error: bool = False,
) -> DistillationResult:
raise NotImplementedError
|
the-stack_0_11898 | import abc
from typing import TYPE_CHECKING
import jsonpickle
from .output.json_writer import ADD_VARIABLE, CHANGE_VARIABLE, EXECUTE_FRAME, NEW_FRAME, REMOVE_VARIABLE
if TYPE_CHECKING:
from .debugger import Debugger
class Replayer(abc.ABC):
def __init__(self: "Debugger"):
# Propagate initialization to other mixins
super().__init__()
def replay_events(self: "Debugger", events):
for event in events:
evt_type = event["event"]
if evt_type == NEW_FRAME:
self.out.write_cur_frame(event["frame_info"], event["output"])
elif evt_type == EXECUTE_FRAME:
frame_info = event["frame_info"]
exec_time = event["exec_time"]
self.out.write_frame_exec(frame_info, exec_time, event["exec_times"])
# Replay changes to frame_exec_times
if frame_info in self.frame_exec_times:
self.frame_exec_times[frame_info].append(exec_time)
else:
self.frame_exec_times[frame_info] = [exec_time]
elif evt_type == ADD_VARIABLE:
self.out.write_add(
event["var_name"], event["value"], event["history"], action=event["action"], plural=event["plural"],
)
elif evt_type == CHANGE_VARIABLE:
self.out.write_change(
event["var_name"],
event["value_before"],
event["value_after"],
event["history"],
action=event["action"],
)
elif evt_type == REMOVE_VARIABLE:
self.out.write_remove(event["var_name"], event["value"], event["history"], action=event["action"])
else:
raise ValueError(f"Unrecognized JSON event '{evt_type}'")
def replay_summary(self: "Debugger", data):
self.vars.update(data["var_history"])
self.out.write_variable_summary(self.vars)
if self.profiler_output:
self.out.write_profiler_summary(self.frame_exec_times)
self.out.write_time_summary(data["exec_start_time"], data["exec_stop_time"])
def replay(self: "Debugger", json_path):
with open(json_path, "r") as f:
data = jsonpickle.loads(f.read())
self.replay_events(data["events"])
self.replay_summary(data)
|
the-stack_0_11899 | from gym.spaces import Box
from ray.rllib.agents.dqn.distributional_q_tf_model import \
DistributionalQTFModel
from ray.rllib.agents.dqn.dqn_torch_model import \
DQNTorchModel
from ray.rllib.models.tf.fcnet import FullyConnectedNetwork
from ray.rllib.models.torch.fcnet import FullyConnectedNetwork as TorchFC
from ray.rllib.utils.framework import try_import_tf, try_import_torch
from ray.rllib.utils.numpy import LARGE_INTEGER
tf = try_import_tf()
torch, nn = try_import_torch()
class ParametricActionsModel(DistributionalQTFModel):
"""Parametric action model that handles the dot product and masking.
This assumes the outputs are logits for a single Categorical action dist.
Getting this to work with a more complex output (e.g., if the action space
is a tuple of several distributions) is also possible but left as an
exercise to the reader.
"""
def __init__(self,
obs_space,
action_space,
num_outputs,
model_config,
name,
true_obs_shape=(4, ),
action_embed_size=2,
**kw):
super(ParametricActionsModel, self).__init__(
obs_space, action_space, num_outputs, model_config, name, **kw)
self.action_embed_model = FullyConnectedNetwork(
Box(-1, 1, shape=true_obs_shape), action_space, action_embed_size,
model_config, name + "_action_embed")
self.register_variables(self.action_embed_model.variables())
def forward(self, input_dict, state, seq_lens):
# Extract the available actions tensor from the observation.
avail_actions = input_dict["obs"]["avail_actions"]
action_mask = input_dict["obs"]["action_mask"]
# Compute the predicted action embedding
action_embed, _ = self.action_embed_model({
"obs": input_dict["obs"]["cart"]
})
# Expand the model output to [BATCH, 1, EMBED_SIZE]. Note that the
# avail actions tensor is of shape [BATCH, MAX_ACTIONS, EMBED_SIZE].
intent_vector = tf.expand_dims(action_embed, 1)
# Batch dot product => shape of logits is [BATCH, MAX_ACTIONS].
action_logits = tf.reduce_sum(avail_actions * intent_vector, axis=2)
# Mask out invalid actions (use tf.float32.min for stability)
inf_mask = tf.maximum(tf.log(action_mask), tf.float32.min)
return action_logits + inf_mask, state
def value_function(self):
return self.action_embed_model.value_function()
class TorchParametricActionsModel(DQNTorchModel, nn.Module):
"""PyTorch version of above ParametricActionsModel."""
def __init__(self,
obs_space,
action_space,
num_outputs,
model_config,
name,
true_obs_shape=(4, ),
action_embed_size=2,
**kw):
nn.Module.__init__(self)
DQNTorchModel.__init__(self, obs_space, action_space, num_outputs,
model_config, name, **kw)
self.action_embed_model = TorchFC(
Box(-1, 1, shape=true_obs_shape), action_space, action_embed_size,
model_config, name + "_action_embed")
def forward(self, input_dict, state, seq_lens):
# Extract the available actions tensor from the observation.
avail_actions = input_dict["obs"]["avail_actions"]
action_mask = input_dict["obs"]["action_mask"]
# Compute the predicted action embedding
action_embed, _ = self.action_embed_model({
"obs": input_dict["obs"]["cart"]
})
# Expand the model output to [BATCH, 1, EMBED_SIZE]. Note that the
# avail actions tensor is of shape [BATCH, MAX_ACTIONS, EMBED_SIZE].
intent_vector = torch.unsqueeze(action_embed, 1)
# Batch dot product => shape of logits is [BATCH, MAX_ACTIONS].
action_logits = torch.sum(avail_actions * intent_vector, dim=2)
# Mask out invalid actions (use -LARGE_INTEGER to tag invalid).
# These are then recognized by the EpsilonGreedy exploration component
# as invalid actions that are not to be chosen.
inf_mask = torch.clamp(
torch.log(action_mask), -float(LARGE_INTEGER), float("inf"))
return action_logits + inf_mask, state
def value_function(self):
return self.action_embed_model.value_function()
|
the-stack_0_11900 | import argparse
import os
import json
import joblib
from azureml.core import Run
from training.train_helper import split_data, train_model, get_model_metrics
dummy1 = os.path.abspath(os.curdir)
print(f"Root directory is {dummy1}")
print(f"Listing files in root directory {os.listdir(dummy1)}")
print("Create new features")
# Giving a description of this file when invoked on command line like python clean.py -h
parser = argparse.ArgumentParser("model training")
parser.add_argument("--model_name", type=str, help="Name of the model", default="titanic_classifier_model.pkl")
parser.add_argument("--output_model", type=str, help="Model Output directory")
# Parse the arguments
args = parser.parse_args()
print(f"Argument 1 (Name of the model):, {args.model_name}")
print(f"Argument 2 (Output Directory of model):, {args.output_model}")
# Output path of this step
model_name = args.model_name
step_output_path = args.output_model
# Load the training parameters from the parameters file
with open("parameters.json") as f:
pars = json.load(f)
try:
train_args = pars["training"]
except KeyError:
print("Could not load training values from file")
train_args = {}
# Get the run context
run = Run.get_context()
# Get the feature eng data
feateng_data = run.input_datasets["feateng_data"]
feateng_df = feateng_data.to_pandas_dataframe()
# Tagging details to the run
run.input_datasets["training_data"] = feateng_data
run.parent.tag("dataset_id", value=feateng_data.id)
# Split the data into train and test
X_cols = ['Passenger_Class', 'Sex', 'SibSp', 'Parch', 'Fare']
target_col = "Survived"
data = split_data(feateng_df, X_cols, target_col)
# Train the model
model = train_model(data, train_args)
# Evaluate and log the metrics returned from the train function
metrics = get_model_metrics(model, data)
for (k, v) in metrics.items():
run.log(k, v)
run.parent.log(k, v)
# Pass model file to next step
os.makedirs(step_output_path, exist_ok=True)
model_output_path = os.path.join(step_output_path, model_name)
joblib.dump(value=model, filename=model_output_path)
# Also upload model file to run outputs for history
os.makedirs('outputs', exist_ok=True)
output_path = os.path.join('outputs', model_name)
joblib.dump(value=model, filename=output_path)
|
the-stack_0_11901 | from __future__ import absolute_import
from __future__ import print_function
import veriloggen
import _iter
expected_verilog = """
module blinkled
(
input CLK,
input RST,
output reg [8-1:0] LED
);
reg [32-1:0] count;
always @(posedge CLK) begin
if(RST) begin
count <= 0;
end else begin
if(count == 1023) begin
count <= 0;
end else begin
count <= count + 1;
end
end
end
always @(posedge CLK) begin
if(RST) begin
LED <= 1;
end else begin
if(count == 1023) begin
LED[0] <= LED[7];
LED[1] <= LED[0];
LED[2] <= LED[1];
LED[3] <= LED[2];
LED[4] <= LED[3];
LED[5] <= LED[4];
LED[6] <= LED[5];
LED[7] <= LED[6];
end
end
end
endmodule
"""
def test():
veriloggen.reset()
test_module = _iter.mkLed()
code = test_module.to_verilog()
from pyverilog.vparser.parser import VerilogParser
from pyverilog.ast_code_generator.codegen import ASTCodeGenerator
parser = VerilogParser()
expected_ast = parser.parse(expected_verilog)
codegen = ASTCodeGenerator()
expected_code = codegen.visit(expected_ast)
assert(expected_code == code)
|
the-stack_0_11903 | import jesse.helpers as jh
from jesse.enums import sides, order_statuses
from jesse.models import Order
from jesse.enums import order_types
from .utils import set_up, single_route_backtest
def test_cancel_order():
set_up()
order = Order({
'id': jh.generate_unique_id(),
'exchange': 'Sandbox',
'symbol': 'BTC-USDT',
'type': order_types.LIMIT,
'price': 129.33,
'qty': 10.2041,
'side': sides.BUY,
'status': order_statuses.ACTIVE,
'created_at': jh.now_to_timestamp(),
})
assert order.is_canceled is False
order.cancel()
assert order.is_canceled is True
assert order.canceled_at == jh.now_to_timestamp()
def test_execute_order():
set_up()
order = Order({
'id': jh.generate_unique_id(),
'symbol': 'BTC-USDT',
'exchange': 'Sandbox',
'type': order_types.LIMIT,
'price': 129.33,
'qty': 10.2041,
'side': sides.BUY,
'status': order_statuses.ACTIVE,
'created_at': jh.now_to_timestamp(),
})
assert order.is_executed is False
assert order.executed_at is None
order.execute()
assert order.is_executed is True
assert order.executed_at == jh.now_to_timestamp()
def test_order_is_stop_loss_property():
single_route_backtest('TestOrderIsStopLossProperty')
def test_order_is_take_profit_property():
single_route_backtest('TestOrderIsTakeProfitProperty')
|
the-stack_0_11904 | #!/usr/bin/env python
'''
TnAmplicons
Analysis of Tn-Seq data, Transposon insertion site detection, initial
version is to process the samples (trim) primers (transoposon sequence)
detect the TA genomic insertion site and map the resulting files to the
genome.
Later version will exand on analysis
'''
import sys
try:
from setuptools import setup, Extension
except ImportError:
from distutils.core import setup, Extension
editdist = Extension('editdist', sources=['lib/editdist.c'])
trim = Extension('trim', sources=['lib/trim.c'])
try:
version_num = open("VERSION", "r+").readline().strip()
except:
sys.stderr.write("Error retrieving version_number")
config = \
{
'description': 'Processing of Illumina amplicon projects - TnSeq version',
'author': 'Matt Settles',
'url': 'https://github.com/msettles/TnAmplicons',
'download_url': 'https://github.com/msettles/TnAmplicons',
'author_email': '[email protected]',
'version': version_num,
'install_requires': [],
'packages': ['TnAmplicons'],
'scripts': ['bin/TnAmplicons'],
'name': 'TnAmplicons',
"ext_package": 'TnAmplicons',
'ext_modules': [editdist, trim]
}
setup(**config)
|
the-stack_0_11905 | import numpy as np
import torch
class SamplingAlgo:
def __init__(self, t_prof, env_bldr, n_envs_avg, n_envs_br, br_buf2, avg_buf2, br_learner2, avg_learner2):
if t_prof.nn_type == "recurrent":
from PokerRL.rl.buffers.BRMemorySaverRNN import BRMemorySaverRNN
from NFSP.workers.la.action_buffer.ActionBufferRNN import AvgMemorySaverRNN
BR_MEM_SAVER = BRMemorySaverRNN
AVG_MEM_SAVER = AvgMemorySaverRNN
elif t_prof.nn_type == "feedforward":
from PokerRL.rl.buffers.BRMemorySaverFLAT import BRMemorySaverFLAT
from NFSP.workers.la.action_buffer.ActionBufferFLAT import AvgMemorySaverFLAT
BR_MEM_SAVER = BRMemorySaverFLAT
AVG_MEM_SAVER = AvgMemorySaverFLAT
else:
raise ValueError(t_prof.nn_type)
self._t_prof = t_prof
self._env_bldr = env_bldr
self._antic = self._t_prof.antic_start
self._br_buf2 = br_buf2
self._avg_buf2 = avg_buf2
self._br_learner2 = br_learner2
self._avg_learner2 = avg_learner2
self._avg_memory_savers = [
[
AVG_MEM_SAVER(env_bldr=self._env_bldr, buffer=self._avg_buf2[p])
for _ in range(n_envs_avg)
]
for p in range(self._env_bldr.N_SEATS)
]
self._br_memory_savers = [
[
BR_MEM_SAVER(env_bldr=self._env_bldr, buffer=self._br_buf2[p])
for _ in range(n_envs_br)
]
for p in range(self._env_bldr.N_SEATS)
]
@property
def antic(self):
return self._antic
@antic.setter
def antic(self, value):
self._antic = value
def play(self, nfsp_iter):
raise NotImplementedError
class SeatActorBase:
AVG = 1
BR = 2
@staticmethod
def act_mixed(owner, current_policy_tags, step_wrappers, br_learner, avg_learner, random_prob):
""" play with p*eps*rnd + p*(1-eps)*br and (1-p)*avg policy """
with torch.no_grad():
# """"""""""""""""""""""""
# Construct
# """"""""""""""""""""""""
_sw_list_AVG = []
_sw_list_BR = []
for sw in step_wrappers:
if current_policy_tags[sw.env_idx] == SeatActorBase.AVG:
_sw_list_AVG.append(sw)
elif current_policy_tags[sw.env_idx] == SeatActorBase.BR:
_sw_list_BR.append(sw)
else:
raise ValueError(current_policy_tags[sw.env_idx])
# """"""""""""""""""""""""
# AVG actions
# """"""""""""""""""""""""
SeatActorBase.act_avg(owner=owner, step_wrappers=_sw_list_AVG, avg_learner=avg_learner)
# """"""""""""""""""""""""
# BR actions
# """"""""""""""""""""""""
if random_prob > 0:
SeatActorBase.act_eps_greedy(owner=owner, step_wrappers=_sw_list_BR, br_learner=br_learner,
random_prob=random_prob)
else:
SeatActorBase.act_greedy(owner=owner, step_wrappers=_sw_list_BR, br_learner=br_learner)
@staticmethod
def act_constant_eps_greedy(owner, step_wrappers, br_learner):
""" BR + eps """
with torch.no_grad():
if len(step_wrappers) > 0:
actions, was_rnd = SeatActorBase.choose_a_br(br_learner=br_learner, owner=owner,
step_wrappers=step_wrappers, random_prob=br_learner.eps)
for i, sw in enumerate(step_wrappers):
sw.action = actions[i].item()
sw.action_was_random = was_rnd
@staticmethod
def act_eps_greedy(owner, step_wrappers, br_learner, random_prob=None):
""" BR + eps """
with torch.no_grad():
if len(step_wrappers) > 0:
actions, was_rnd = SeatActorBase.choose_a_br(br_learner=br_learner, owner=owner,
step_wrappers=step_wrappers,
random_prob=br_learner.eps if random_prob is None else random_prob)
for i, sw in enumerate(step_wrappers):
sw.action = actions[i].item()
sw.action_was_random = was_rnd
@staticmethod
def act_greedy(owner, step_wrappers, br_learner):
""" BR + eps """
with torch.no_grad():
if len(step_wrappers) > 0:
actions, was_rnd = SeatActorBase.choose_a_br(br_learner=br_learner, owner=owner,
step_wrappers=step_wrappers, random_prob=0)
for i, sw in enumerate(step_wrappers):
sw.action = actions[i].item()
sw.action_was_random = was_rnd
@staticmethod
def act_avg(owner, step_wrappers, avg_learner):
if len(step_wrappers) > 0:
a_probs = avg_learner.get_a_probs(
pub_obses=[sw.obs for sw in step_wrappers],
range_idxs=np.array([sw.range_idxs[owner] for sw in step_wrappers], dtype=np.int32),
legal_actions_lists=[sw.legal_actions_list for sw in step_wrappers],
)
_n_actions_arranged = np.arange(a_probs.shape[-1])
for i, sw in enumerate(step_wrappers):
sw.action = np.random.choice(
a=_n_actions_arranged,
p=a_probs[i],
replace=True
).item()
sw.action_was_random = False
@staticmethod
def choose_a_br(owner, br_learner, step_wrappers, random_prob):
"""
TODO maybe allow some explore some BR
Returns:
actions, was_random?:
"""
pub_obses = [sw.obs for sw in step_wrappers]
range_idxs = [sw.range_idxs[owner] for sw in step_wrappers]
legal_actions_lists = [sw.legal_actions_list for sw in step_wrappers]
# """""""""""""""""""""
# Perhaps explore
# """""""""""""""""""""
if random_prob > np.random.random():
actions = np.array([
l[np.random.randint(low=0, high=len(l))]
for l in legal_actions_lists
])
return actions, True
with torch.no_grad():
# """""""""""""""""""""
# Play by BR
# """""""""""""""""""""
actions = br_learner.select_br_a(
pub_obses=pub_obses,
range_idxs=range_idxs,
legal_actions_lists=legal_actions_lists,
)
return actions, False
@staticmethod
def pick_training_policy(br_prob):
if br_prob < np.random.random():
return SeatActorBase.AVG
return SeatActorBase.BR
|
the-stack_0_11907 | import os
import shutil
import tempfile
from datetime import datetime
from typing import Text, List, Dict
import requests
from fastapi import File
from fastapi.background import BackgroundTasks
from fastapi.security import OAuth2PasswordBearer
from loguru import logger
from mongoengine.errors import ValidationError
from rasa.shared.constants import DEFAULT_DATA_PATH
from rasa.shared.nlu.constants import TEXT
from rasa.shared.nlu.training_data import entities_parser
from rasa.shared.nlu.training_data.formats.markdown import MarkdownReader
from .constant import ALLOWED_NLU_FORMATS, ALLOWED_STORIES_FORMATS, \
ALLOWED_DOMAIN_FORMATS, ALLOWED_CONFIG_FORMATS, EVENT_STATUS, ALLOWED_RULES_FORMATS, ALLOWED_HTTP_ACTIONS_FORMATS, \
REQUIREMENTS
from .constant import RESPONSE
from .training_data_generation_processor import TrainingDataGenerationProcessor
from ...api.models import HttpActionParametersResponse, HttpActionConfigResponse
from ...exceptions import AppException
from ...shared.actions.data_objects import HttpActionConfig
from ...shared.models import StoryStepType
from ...shared.utils import Utility
class DataUtility:
"""Class contains logic for various utilities"""
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="/api/auth/login")
oauth2_scheme_non_strict = OAuth2PasswordBearer(tokenUrl="/api/auth/login", auto_error=False)
markdown_reader = MarkdownReader()
@staticmethod
def prepare_nlu_text(example: Text, entities: List[Dict]):
"""
combines plain text and entities into training example format
:param example: training example plain text
:param entities: list of entities
:return: trianing example combine with enities
"""
if not Utility.check_empty_string(example):
if entities:
from rasa.shared.nlu.training_data.formats.rasa_yaml import RasaYAMLWriter
example = RasaYAMLWriter.generate_message({'text': example, "entities": entities})
return example
@staticmethod
async def save_uploaded_data(bot: Text, training_files: [File]):
if not training_files:
raise AppException("No files received!")
if training_files[0].filename.endswith('.zip'):
bot_data_home_dir = await DataUtility.save_training_files_as_zip(bot, training_files[0])
else:
bot_data_home_dir = os.path.join('training_data', bot, str(datetime.utcnow()))
data_path = os.path.join(bot_data_home_dir, DEFAULT_DATA_PATH)
Utility.make_dirs(data_path)
for file in training_files:
if file.filename in ALLOWED_NLU_FORMATS.union(ALLOWED_STORIES_FORMATS).union(ALLOWED_RULES_FORMATS):
path = os.path.join(data_path, file.filename)
Utility.write_to_file(path, await file.read())
elif file.filename in ALLOWED_CONFIG_FORMATS.union(ALLOWED_DOMAIN_FORMATS).union(
ALLOWED_HTTP_ACTIONS_FORMATS):
path = os.path.join(bot_data_home_dir, file.filename)
Utility.write_to_file(path, await file.read())
return bot_data_home_dir
@staticmethod
async def save_training_files_as_zip(bot: Text, training_file: File):
tmp_dir = tempfile.mkdtemp()
try:
zipped_file = os.path.join(tmp_dir, training_file.filename)
Utility.write_to_file(zipped_file, await training_file.read())
unzip_path = os.path.join('training_data', bot, str(datetime.utcnow()))
shutil.unpack_archive(zipped_file, unzip_path, 'zip')
return unzip_path
except Exception as e:
logger.error(e)
raise AppException("Invalid zip")
finally:
Utility.delete_directory(tmp_dir)
@staticmethod
def validate_and_get_requirements(bot_data_home_dir: Text, delete_dir_on_exception: bool = False):
"""
Checks whether at least one of the required files are present and
finds other files required for validation during import.
@param bot_data_home_dir: path where data exists
@param delete_dir_on_exception: whether directory needs to be deleted in case of exception.
"""
requirements = set()
data_path = os.path.join(bot_data_home_dir, DEFAULT_DATA_PATH)
if not os.path.exists(bot_data_home_dir):
raise AppException("Bot data home directory not found")
files_received = set(os.listdir(bot_data_home_dir))
if os.path.exists(data_path):
files_received = files_received.union(os.listdir(data_path))
if ALLOWED_NLU_FORMATS.intersection(files_received).__len__() < 1:
requirements.add('nlu')
if ALLOWED_STORIES_FORMATS.intersection(files_received).__len__() < 1:
requirements.add('stories')
if ALLOWED_DOMAIN_FORMATS.intersection(files_received).__len__() < 1:
requirements.add('domain')
if ALLOWED_CONFIG_FORMATS.intersection(files_received).__len__() < 1:
requirements.add('config')
if ALLOWED_RULES_FORMATS.intersection(files_received).__len__() < 1:
requirements.add('rules')
if ALLOWED_HTTP_ACTIONS_FORMATS.intersection(files_received).__len__() < 1:
requirements.add('http_actions')
if requirements == REQUIREMENTS:
if delete_dir_on_exception:
Utility.delete_directory(bot_data_home_dir)
raise AppException('Invalid files received')
return requirements
@staticmethod
async def save_training_files(nlu: File, domain: File, config: File, stories: File, rules: File = None,
http_action: File = None):
"""
convert mongo data to individual files
:param nlu: nlu data
:param domain: domain data
:param stories: stories data
:param config: config data
:param rules: rules data
:param http_action: http actions data
:return: files path
"""
training_file_loc = {}
tmp_dir = tempfile.mkdtemp()
data_path = os.path.join(tmp_dir, DEFAULT_DATA_PATH)
os.makedirs(data_path)
nlu_path = os.path.join(data_path, nlu.filename)
domain_path = os.path.join(tmp_dir, domain.filename)
stories_path = os.path.join(data_path, stories.filename)
config_path = os.path.join(tmp_dir, config.filename)
Utility.write_to_file(nlu_path, await nlu.read())
Utility.write_to_file(domain_path, await domain.read())
Utility.write_to_file(stories_path, await stories.read())
Utility.write_to_file(config_path, await config.read())
training_file_loc['rules'] = await DataUtility.write_rule_data(data_path, rules)
training_file_loc['http_action'] = await DataUtility.write_http_data(tmp_dir, http_action)
training_file_loc['nlu'] = nlu_path
training_file_loc['config'] = config_path
training_file_loc['stories'] = stories_path
training_file_loc['domain'] = domain_path
training_file_loc['root'] = tmp_dir
return training_file_loc
@staticmethod
async def write_rule_data(data_path: str, rules: File = None):
"""
writes the rule data to file and returns the file path
:param data_path: path of the data files
:param rules: rules data
:return: rule file path
"""
if rules and rules.filename:
rules_path = os.path.join(data_path, rules.filename)
Utility.write_to_file(rules_path, await rules.read())
return rules_path
else:
return None
@staticmethod
async def write_http_data(temp_path: str, http_action: File = None):
"""
writes the http_actions data to file and returns the file path
:param temp_path: path of the temporary directory
:param http_action: http_action data
:return: http_action file path
"""
if http_action and http_action.filename:
http_path = os.path.join(temp_path, http_action.filename)
Utility.write_to_file(http_path, await http_action.read())
return http_path
else:
return None
@staticmethod
def extract_text_and_entities(text: Text):
"""
extract entities and plain text from markdown intent example
:param text: markdown intent example
:return: plain intent, list of extracted entities
"""
example = entities_parser.parse_training_example(text)
return example.get(TEXT), example.get('entities', None)
@staticmethod
def __extract_response_button(buttons: Dict):
"""
used to prepare ResponseButton by extracting buttons configuration from bot utterance
:param buttons: button configuration in bot response
:return: yields ResponseButton
"""
from .data_objects import ResponseButton
for button in buttons:
yield ResponseButton._from_son(button)
@staticmethod
def prepare_response(value: Dict):
"""
used to prepare bot utterance either Text or Custom for saving in Mongo
:param value: utterance value
:return: response type, response object
"""
from .data_objects import ResponseText, ResponseCustom
if RESPONSE.Text.value in value:
response_text = ResponseText()
response_text.text = str(value[RESPONSE.Text.value]).strip()
if RESPONSE.IMAGE.value in value:
response_text.image = value[RESPONSE.IMAGE.value]
if RESPONSE.CHANNEL.value in value:
response_text.channel = value["channel"]
if RESPONSE.BUTTONS.value in value:
response_text.buttons = list(
DataUtility.__extract_response_button(value[RESPONSE.BUTTONS.value])
)
data = response_text
response_type = "text"
elif RESPONSE.CUSTOM.value in value:
data = ResponseCustom._from_son(
{RESPONSE.CUSTOM.value: value[RESPONSE.CUSTOM.value]}
)
response_type = "custom"
else:
response_type = None
data = None
return response_type, data
@staticmethod
def get_rasa_core_policies():
from rasa.core.policies import registry
file1 = open(registry.__file__, 'r')
Lines = file1.readlines()
policy = []
for line in Lines:
if line.startswith("from"):
items = line.split("import")[1].split(",")
for item in items:
policy.append(item.strip())
return policy
@staticmethod
def build_http_response_object(http_action_config: HttpActionConfig, user: str, bot: str):
"""
Builds a new HttpActionConfigResponse object from HttpActionConfig object.
:param http_action_config: HttpActionConfig object containing configuration for the Http action
:param user: user id
:param bot: bot id
:return: HttpActionConfigResponse containing configuration for Http action
"""
http_params = [
HttpActionParametersResponse(key=param.key, value=param.value, parameter_type=param.parameter_type)
for param in
http_action_config.params_list]
response = HttpActionConfigResponse(
auth_token=http_action_config.auth_token,
action_name=http_action_config.action_name,
response=http_action_config.response,
http_url=http_action_config.http_url,
request_method=http_action_config.request_method,
params_list=http_params,
user=user,
bot=bot
)
return response
@staticmethod
def trigger_data_generation_event(bot: str, user: str, token: str):
try:
event_url = Utility.environment['data_generation']['event_url']
logger.info("Training data generator event started")
response = requests.post(event_url, headers={'content-type': 'application/json'},
json={'user': user, 'token': token})
logger.info("Training data generator event completed" + response.content.decode('utf8'))
except Exception as e:
logger.error(str(e))
TrainingDataGenerationProcessor.set_status(bot=bot,
user=user,
status=EVENT_STATUS.FAIL.value,
exception=str(e))
@staticmethod
def get_interpreter(model_path):
from rasa.model import get_model, get_model_subdirectories
from rasa.core.interpreter import create_interpreter
try:
with get_model(model_path) as unpacked_model:
_, nlu_model = get_model_subdirectories(unpacked_model)
_interpreter = create_interpreter(
nlu_model
)
except Exception:
logger.debug(f"Could not load interpreter from '{model_path}'.")
_interpreter = None
return _interpreter
@staticmethod
def train_model(background_tasks: BackgroundTasks, bot: Text, user: Text, email: Text, process_type: Text):
"""
train model common code when uploading files or training a model
:param background_tasks: fast api background task
:param bot: bot id
:param user: user id
:param email: user email for generating token for reload
:param process_type: either upload or train
"""
from ...shared.data.model_processor import ModelProcessor
from ...shared.auth import Authentication
from ...shared.data.constant import MODEL_TRAINING_STATUS
from ...train import start_training
exception = process_type != 'upload'
ModelProcessor.is_training_inprogress(bot, raise_exception=exception)
ModelProcessor.is_daily_training_limit_exceeded(bot, raise_exception=exception)
ModelProcessor.set_training_status(
bot=bot, user=user, status=MODEL_TRAINING_STATUS.INPROGRESS.value,
)
token = Authentication.create_access_token(data={"sub": email}, token_expire=180)
background_tasks.add_task(
start_training, bot, user, token.decode('utf8')
)
@staticmethod
def validate_flow_events(events, type, name):
from rasa.shared.core.constants import RULE_SNIPPET_ACTION_NAME
Utility.validate_document_list(events)
if type == "STORY" and events[0].type != "user":
raise ValidationError("First event should be an user")
if type == "RULE":
if events[0].name == RULE_SNIPPET_ACTION_NAME and events[0].type == "action":
if events[1].type != "user":
raise ValidationError('First event should be an user or conversation_start action')
else:
if events[0].type != "user":
raise ValidationError('First event should be an user or conversation_start action')
if events[len(events) - 1].type == "user":
raise ValidationError("user event should be followed by action")
intents = 0
for i, j in enumerate(range(1, len(events))):
if events[i].type == "user":
intents = intents + 1
if events[i].type == "user" and events[j].type == "user":
raise ValidationError("Found 2 consecutive user events")
if type == "RULE" and intents > 1:
raise ValidationError(
f"""Found rules '{name}' that contain more than user event.\nPlease use stories for this case""")
@staticmethod
def load_fallback_actions(bot: Text):
from .processor import MongoProcessor
mongo_processor = MongoProcessor()
config = mongo_processor.load_config(bot)
fallback_action = DataUtility.parse_fallback_action(config)
nlu_fallback_action = MongoProcessor.fetch_nlu_fallback_action(bot)
return fallback_action, nlu_fallback_action
@staticmethod
def parse_fallback_action(config: Dict):
fallback_action = "action_default_fallback"
action_fallback = next((comp for comp in config['policies'] if comp["name"] == "RulePolicy"), None)
if action_fallback:
fallback_action = action_fallback.get("core_fallback_action_name", fallback_action)
return fallback_action
@staticmethod
def load_default_actions():
from kairon.importer.validator.file_validator import DEFAULT_ACTIONS
return list(DEFAULT_ACTIONS - {"action_default_fallback", "action_two_stage_fallback"})
@staticmethod
def get_template_type(story: Dict):
steps = story['steps']
if len(steps) == 2 and steps[0]['type'] == StoryStepType.intent and steps[1]['type'] == StoryStepType.bot:
template_type = 'Q&A'
else:
template_type = 'CUSTOM'
return template_type |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.