id
stringlengths 3
8
| content
stringlengths 100
981k
|
---|---|
478217
|
from bitmovin.utils import Serializable
class AutoRestartConfiguration(Serializable):
def __init__(self, segments_written_timeout: float = None, bytes_written_timeout: float = None,
frames_written_timeout: float = None, hls_manifests_update_timeout: float = None,
dash_manifests_update_timeout: float = None, schedule_expression: str = None):
super().__init__()
self.segmentsWrittenTimeout = segments_written_timeout
self.bytesWrittenTimeout = bytes_written_timeout
self.framesWrittenTimeout = frames_written_timeout
self.hlsManifestsUpdateTimeout = hls_manifests_update_timeout
self.dashManifestsUpdateTimeout = dash_manifests_update_timeout
self.scheduleExpression = schedule_expression
|
478239
|
import argparse
import sys
from .__init__ import __version__
from .svafotate_main import add_annotation
from .pickle_source import add_pickle_source
from .custom_annotations import add_custom_annotation
def main(args=None):
if args is None:
args = sys.argv[1:]
parser = argparse.ArgumentParser(
prog="svafotate",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="""SVAFotate: Structural Variant Allele Frequency annotator
=================================================="""
)
parser.add_argument(
"-v",
"--version",
help="Installed version",
action="version",
version="%(prog)s " + str(__version__),
)
sub = parser.add_subparsers(title="[sub-commands]", dest="command")
sub.required = True
add_annotation(sub)
add_pickle_source(sub)
add_custom_annotation(sub)
args = parser.parse_args(args)
args.func(parser, args)
if __name__ == "__main__":
sys.exit(main() or 0)
|
478250
|
class Solution:
def removeInvalidParentheses(self, s):
r = []
self.remove(s, r, 0, 0, "()")
return r
def remove(self, s, r, li, lj, c):
cnt = 0
for i, e in enumerate(s[li:], start=li):
cnt += (1, -1, 0)[c.find(e)]
if cnt < 0:
for j, f in enumerate(s[lj : i + 1], start=lj):
if f == c[1] and (j == lj or s[j - 1] != c[1]):
self.remove(s[:j] + s[j + 1 :], r, i, j, c)
return
if c[0] == "(":
self.remove(s[::-1], r, 0, 0, ")(")
else:
r.append(s[::-1])
|
478261
|
import autolens as al
import pytest
@pytest.fixture(name="dataset_dict")
def make_dataset_dict():
return {
"name": "name",
"positions": [[1, 2]],
"positions_noise_map": [1],
"fluxes": [2],
"fluxes_noise_map": [3],
}
@pytest.fixture(name="dataset")
def make_dataset():
return al.PointDataset(
"name",
positions=al.Grid2DIrregular([(1, 2)]),
positions_noise_map=al.ValuesIrregular([1]),
fluxes=al.ValuesIrregular([2]),
fluxes_noise_map=al.ValuesIrregular([3]),
)
class TestDataset:
def test_to_dict(self, dataset_dict, dataset):
assert dataset.dict == dataset_dict
def test_from_dict(self, dataset_dict, dataset):
dataset_ = al.PointDataset.from_dict(dataset_dict)
assert (dataset_.positions == dataset.positions).all()
assert (dataset_.positions_noise_map == dataset.positions_noise_map).all()
assert (dataset_.fluxes == dataset.fluxes).all()
assert (dataset_.fluxes_noise_map == dataset.fluxes_noise_map).all()
class TestDict:
def test_dicts(self, dataset, dataset_dict):
point_dict = al.PointDict([dataset])
assert point_dict.dicts == [dataset_dict]
def test_from_dicts(self, dataset, dataset_dict):
point_dict = al.PointDict.from_dicts([dataset_dict])
assert len(point_dict) == 1
assert dataset.name in point_dict
|
478266
|
from amitools.vamos.atypes import Library, LibFlags, NodeType, ExecLibrary
from amitools.vamos.loader import SegList
from amitools.vamos.machine.regs import *
from amitools.vamos.machine.opcodes import op_jmp
class LibFuncs(object):
LVO_Open = 1
LVO_Close = 2
LVO_Expunge = 3
def __init__(self, machine, alloc):
self.machine = machine
self.mem = machine.get_mem()
self.alloc = alloc
def find_library(self, lib_name, exec_lib=None):
"""find lib by name and return base addr or 0"""
if exec_lib is None:
exec_addr = self.mem.r32(4)
exec_lib = ExecLibrary(self.mem, exec_addr)
node = exec_lib.lib_list.find_name(lib_name)
if node:
return node.get_addr()
else:
return 0
def add_library(self, lib_base, exec_lib=None):
lib = Library(self.mem, lib_base)
lib.node.type = NodeType.NT_LIBRARY
self.sum_library(lib_base)
if exec_lib is None:
exec_addr = self.mem.r32(4)
exec_lib = ExecLibrary(self.mem, exec_addr)
exec_lib.lib_list.enqueue(lib.node)
def sum_library(self, lib_base):
lib = Library(self.mem, lib_base)
lib.flags.clr_bits(LibFlags.LIBF_CHANGED | LibFlags.LIBF_SUMUSED)
return lib.update_sum()
def rem_library(self, lib_base, seg_loader, run_sp=None):
seglist = self._run_expunge(lib_base, run_sp)
if seglist != 0:
seg_loader.unload_seglist(seglist)
return seglist
def open_library(self, lib_base, run_sp=None):
return self._run_open(lib_base, run_sp)
def close_library(self, lib_base, seg_loader, run_sp=None):
seglist = self._run_close(lib_base, run_sp)
if seglist != 0:
seg_loader.unload_seglist(seglist)
return seglist
def set_function(self, lib_base, lvo, new_func_addr):
"""return old func addr or None if patch failed"""
lib = Library(self.mem, lib_base)
neg_size = lib.neg_size
if lvo < 0:
lvo = -lvo
# check lvo range
if lvo >= neg_size:
return None
# check that jmp is at lvo
addr = lib_base - lvo
jmp = self.mem.r16(addr)
if jmp != op_jmp:
return None
# set new function
old_func = self.mem.r32(addr + 2)
self.mem.w32(addr + 2, new_func_addr)
# sum lib
self.sum_library(lib_base)
return old_func
def _run_open(self, lib_base, run_sp=None):
"""call lib open and returns lib_base"""
return self._run_lvo(lib_base, self.LVO_Open, "LibOpen", run_sp)
def _run_close(self, lib_base, run_sp=None):
"""call lib close and return seg_list or 0"""
return self._run_lvo(lib_base, self.LVO_Close, "LibClose", run_sp)
def _run_expunge(self, lib_base, run_sp=None):
"""call lib expunge and return seg_list or 0"""
return self._run_lvo(lib_base, self.LVO_Expunge, "LibExpunge", run_sp)
def _run_lvo(self, lib_base, lvo, name, run_sp=None):
# call expunge func on lib
func_addr = lib_base - lvo * 6
set_regs = {REG_A6: lib_base}
get_regs = [REG_D0]
# run machine and share current sp if none is given
rs = self.machine.run(
func_addr, sp=run_sp, set_regs=set_regs, get_regs=get_regs, name=name
)
return rs.regs[REG_D0]
|
478275
|
import smtplib
import datetime
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
def send_notification(config, notification):
if "email_notification" not in config:
return
if config["email_notification"]["ssl"]:
smtp = smtplib.SMTP_SSL(config["email_notification"]["host"])
else:
smtp = smtplib.SMTP(config["email_notification"]["host"])
if config["email_notification"]["tls"]:
smtp.starttls()
smtp.login(config["email_notification"]["username"], config["email_notification"]["password"])
message = MIMEMultipart()
message["From"] = config["email_notification"]["from"]
message["To"] = config["email_notification"]["to"]
message["Subject"] = f"Doomsday Machine Notification {str(datetime.datetime.now())}"
message.attach(MIMEText(notification, "plain"))
smtp.sendmail(config["email_notification"]["from"], config["email_notification"]["to"], message.as_string())
|
478279
|
import sys, time
import socket
from PyQt5 import QtGui
from PyQt5 import QtCore
from PyQt5.QtWidgets import QApplication, QDialog
from PyQt5.QtCore import QCoreApplication
from threading import Thread
from socketserver import ThreadingMixIn
from demoServer import *
conn=None
class Window(QDialog):
def __init__(self):
super().__init__()
self.ui = Ui_Dialog()
self.ui.setupUi(self)
self.textEditMessages=self.ui.textEditMessages
self.ui.pushButtonSend.clicked.connect(self.dispMessage)
self.show()
def dispMessage(self):
text=self.ui.lineEditMessage.text()
global conn
conn.send(text.encode("utf-8"))
self.ui.textEditMessages.append("Server: "+self.ui.lineEditMessage.text())
self.ui.lineEditMessage.setText("")
class ServerThread(Thread):
def __init__(self,window):
Thread.__init__(self)
self.window=window
def run(self):
TCP_IP = '0.0.0.0'
TCP_PORT = 80
BUFFER_SIZE = 1024
tcpServer = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tcpServer.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
tcpServer.bind((TCP_IP, TCP_PORT))
threads = []
tcpServer.listen(4)
while True:
global conn
(conn, (ip,port)) = tcpServer.accept()
newthread = ClientThread(ip,port,window)
newthread.start()
threads.append(newthread)
for t in threads:
t.join()
class ClientThread(Thread):
def __init__(self,ip,port,window):
Thread.__init__(self)
self.window=window
self.ip = ip
self.port = port
def run(self):
while True :
global conn
data = conn.recv(1024)
window.textEditMessages.append("Client: "+data.decode("utf-8"))
if __name__=="__main__":
app = QApplication(sys.argv)
window = Window()
serverThread=ServerThread(window)
serverThread.start()
window.exec()
sys.exit(app.exec_())
|
478293
|
import pytest
from pydantic import ValidationError
from botx.models.buttons import Button, ButtonOptions
class CustomButton(Button):
"""Button without custom behaviour."""
def test_label_will_be_set_to_command_if_none():
assert CustomButton(command="/cmd").label == "/cmd"
def test_label_can_be_set_if_passed_explicitly():
assert CustomButton(command="/cmd", label="temp").label == "temp"
def test_empty_label():
assert CustomButton(command="/cmd", label="").label == ""
def test_create_button_options_with_invalid_hsize():
with pytest.raises(ValidationError) as exc_info:
ButtonOptions(h_size=0)
assert "should be positive integer" in str(exc_info)
|
478319
|
from datetime import datetime
LEVELS = ['trace', 'debug', 'info', 'warn', 'severe']
class Logger():
__slots__ = ['logger', 'level']
# def __init__(self, logger='', level='debug'):
def __init__(self, logger='', level='debug'):
self.level = LEVELS.index(level)
self.logger = logger
def severe(self, text, logger='', report=None):
return self.log(text, logger, level='severe')
def warn(self, text, logger='', report=None):
return self.log(text, logger, level='warn')
def debug(self, text, logger='', report=None):
return self.log(text, logger, level='debug')
def trace(self, text, logger='', report=None):
return self.log(text, logger, level='trace')
def info(self, text, logger='', report=None):
return self.log(text, logger)
def log(self, text, logger='', level='info', report=None):
if level not in LEVELS:
raise BaseException(f'Illegal level: "{level}"! Expected one of: {LEVELS}')
if LEVELS.index(level) < self.level:
return 'ignored'
logg = f'{self.logger}.{logger}' if (self.logger and logger) else (self.logger or logger)
if not logg:
raise BaseException('Logger required!')
print(f"[{datetime.utcnow()}][{level.upper()}][{logg}] {text}", flush=True)
return 'ok'
|
478342
|
from django import template
from landing.models import AboutUs, Data, Visualization
register = template.Library()
@register.assignment_tag
def get_aboutus_tag():
return AboutUs.objects.first()
@register.assignment_tag
def get_datalist_tag():
return Data.objects.all().order_by('-added')
@register.assignment_tag
def get_Visualization_tag():
return Visualization.objects.all().order_by('-added')
@register.assignment_tag
def get_recentVisualization_tag():
limit = 5
return Visualization.objects.order_by('-added')[:limit]
@register.assignment_tag
def get_recentDataset_tag():
limit = 5
return Data.objects.order_by('-added')[:limit]
|
478346
|
import numpy as np
def segment_list_equal(s1: np.ndarray, s2: np.ndarray) -> bool:
if len(s1) != len(s2):
return False
set1 = {frozenset(tuple(c) for c in s) for s in s1}
set2 = {frozenset(tuple(c) for c in s) for s in s2}
return set1 == set2
|
478354
|
import pytest
from elliptic.Kernel.DSL import DSLBuildError, DSLMeta
class TestDSL:
def test_root(self, dsl, mocker):
mocker.patch('elliptic.Kernel.TreeBuilder.TreeBuild.build')
assert not dsl.building
assert not dsl.built
with dsl.root() as root:
assert dsl.building
assert not dsl.built
assert not dsl.building
assert dsl.built
def test_root_raises_MCIBuildError(self, dsl, mocker):
mocker.patch('elliptic.Kernel.TreeBuilder.TreeBuild.build')
with dsl.root() as root:
with pytest.raises(DSLBuildError):
with dsl.root() as root2:
pass
def test_get_built_module(self, dsl, mocker):
tree_build = mocker.patch('elliptic.Kernel.TreeBuilder.TreeBuild.build')
tree_build.return_value = mocker.sentinel.built_module
with dsl.root() as root:
pass
assert dsl.get_built_module() is mocker.sentinel.built_module
def test_get_built_module_raises_MCIBuildError(self, dsl, mocker):
mocker.patch('elliptic.Kernel.TreeBuilder.TreeBuild.build')
with pytest.raises(DSLBuildError):
dsl.get_built_module()
def test_build(self, dsl, mocker):
tree_build = mocker.patch('elliptic.Kernel.TreeBuilder.TreeBuild.build')
with dsl.root() as root:
pass
tree_build.assert_called_once()
class TestDSLMeta:
def test_dsl_meta_raises_TypeError(self):
with pytest.raises(TypeError):
DSLMeta()
|
478373
|
import random
import slacker
from slackbot import settings
from slackbot.bot import listen_to, respond_to
from slackbot.dispatcher import Message
from ..botmessage import botsend, botwebapi
from .plusplus_model import Plusplus
PLUS_MESSAGE = (
"leveled up!",
"レベルが上がりました!",
"やったね",
"(☝՞ਊ ՞)☝ウェーイ",
)
MINUS_MESSAGE = (
"leveled down.",
"レベルが下がりました",
"ドンマイ!",
"(´・ω・`)",
)
def _get_user_name(user_id: str) -> str:
"""
指定された Slack の user_id に対応する username を返す
Slacker で users.list API を呼び出す
- https://github.com/os/slacker
- https://api.slack.com/methods/users.info
"""
webapi = slacker.Slacker(settings.API_TOKEN)
response = webapi.users.info(user_id)
if response.body["ok"]:
return response.body["user"]["name"]
else:
return ""
def _update_count(message: Message, target: str, plusplus: str) -> None:
"""
指定ユーザーのカウンターを更新する
"""
target = target.lower()
# 1文字の対象は無視する
if len(target) < 2:
return
plus, created = Plusplus.get_or_create(name=target, defaults={"counter": 0})
if plusplus == "++":
plus.counter += 1
msg = random.choice(PLUS_MESSAGE)
else:
plus.counter -= 1
msg = random.choice(MINUS_MESSAGE)
plus.save()
botsend(message, f"{target} {msg} (通算: {plus.counter})")
@listen_to(r"^(.*):?\s*(\+\+|--)")
def multi_plusplus(message: Message, targets: str, plusplus: str) -> None:
"""
指定された複数の名前に対して ++ する
takanory terada++
takanory terada ++
takanory terada: ++
日本語++
takanory @terada++ コメント
"""
for target in targets.split():
# user_id(<@XXXXXX>)をユーザー名に変換する
if target.startswith("<@"):
user_id = target[2:-1] # user_idを取り出す
target = _get_user_name(user_id)
# 先頭に @ があったら削除する
if target.startswith("@"):
target = target[1:]
_update_count(message, target, plusplus)
@respond_to(r"^plusplus\s+(del|delete)\s+(\S+)")
def plusplus_delete(message: Message, subcommand: str, name: str) -> None:
"""
指定された名前を削除する
カウントが10未満のもののみ削除する
"""
try:
plus = Plusplus.get(name=name)
except Plusplus.DoesNotExist:
message.send(f"`{name}` という名前は登録されていません")
return
if abs(plus.counter) > 10:
botsend(message, f"`{name}` のカウントが多いので削除を取り消しました(count: {plus.counter})")
return
plus.delete_instance()
message.send(f"`{name}` を削除しました")
@respond_to(r"^plusplus\s+rename\s+(\S+)\s+(\S+)")
def plusplus_rename(message: Message, old: str, new: str) -> None:
"""
指定された old から new に名前を変更する
"""
try:
oldplus = Plusplus.get(name=old)
except Plusplus.DoesNotExist:
botsend(message, f"`{old}` という名前は登録されていません")
return
newplus, created = Plusplus.get_or_create(name=new, counter=oldplus.counter)
if not created:
# すでに存在している
message.send(f"`{new}` という名前はすでに登録されています")
return
# 入れ替える
oldplus.delete_instance()
botsend(message, f"`{old}` から `{new}` に名前を変更しました(count: {oldplus.counter})")
@respond_to(r"^plusplus\s+merge\s+(\S+)\s+(\S+)")
def plusplus_merge(message: Message, old: str, new: str) -> None:
"""
指定された old と new を一つにまとめる
"""
try:
oldplus = Plusplus.get(name=old)
except Plusplus.DoesNotExist:
botsend(message, f"`{old}` という名前は登録されていません")
return
try:
newplus = Plusplus.get(name=new)
except Plusplus.DoesNotExist:
botsend(message, f"`{new}` という名前は登録されていません")
return
oldcount = oldplus.counter
newcount = newplus.counter
# 値を統合する
newplus.counter += oldplus.counter
newplus.save()
oldplus.delete_instance()
botsend(
message,
(
f"`{old}` を `{new}` に統合しました"
f"(count: {oldcount} + {newcount} = {newplus.counter})"
),
)
@respond_to(r"^plusplus\s+search\s+(\S+)")
def plusplus_search(message: Message, keyword: str) -> None:
"""
指定されたキーワードを含む名前とカウントの一覧を返す
"""
pattern = f"%{keyword}%"
pluses = Plusplus.select().where(Plusplus.name ** pattern)
if len(pluses) == 0:
botsend(message, f"`{keyword}` を含む名前はありません")
else:
pretext = f"`{keyword}` を含む名前とカウントの一覧です\n"
text = ""
for plus in pluses:
text += f"- {plus.name}(count: {plus.counter})\n"
attachments = [
{
"pretext": pretext,
"text": text,
"mrkdwn_in": ["pretext", "text"],
}
]
botwebapi(message, attachments)
@respond_to(r"^plusplus\s+help+")
def plusplus_help(message: Message) -> None:
"""
ヘルプメッセージを返す
"""
botsend(
message,
"""- `名前1 名前2++`: 指定された名前に +1 カウントする
- `名前1 名前2--`: 指定された名前に -1 カウントする
- `$plusplus search (キーワード)`: 名前にキーワードを含む一覧を返す
- `$plusplus delete (名前)`: カウントを削除する(カウント10未満のみ)
- `$plusplus rename (変更前) (変更後)`: カウントする名前を変更する
- `$plusplus merge (統合元) (統合先)`: 2つの名前のカウントを統合先の名前にまとめる
""",
)
|
478395
|
from django.test import TestCase
from .test_template_tags import render_template
class BaseTest(TestCase):
"""Test the IconRenderer."""
def test_icons(self):
self.assertEqual(
render_template('{% icon "" %}'),
"<i></i>",
)
self.assertEqual(
render_template('{% icon "user" %}'),
'<i class="user"></i>',
)
self.assertEqual(
render_template('{% icon "fas fa-user fa-2x" %}'),
'<i class="fas fa-user fa-2x"></i>',
)
def test_extra_classes(self):
self.assertEqual(
render_template('{% icon "extra-triangle" %}'),
'<i class="fas fa-triangle fa-fw extra"></i>',
)
self.assertEqual(
render_template('{% icon "extra-triangle" extra_classes="and more" %}'),
'<i class="fas fa-triangle fa-fw extra and more"></i>',
)
self.assertEqual(
render_template('{% icon "extra-triangle" "and" extra_classes="more" %}'),
'<i class="fas fa-triangle fa-fw extra and more"></i>',
)
|
478451
|
import math
import pandas as pd
from causallearn.utils.ScoreUtils import *
def local_score_BIC(Data, i, PAi, parameters=None):
'''
Calculate the *negative* local score with BIC for the linear Gaussian continue data case
Parameters
----------
Data: ndarray, (sample, features)
i: current index
PAi: parent indexes
parameters: lambda_value, the penalty discount of bic
Returns
-------
score: local BIC score
'''
if parameters is None:
lambda_value = 1
else:
lambda_value = parameters['lambda_value']
Data = np.mat(Data)
T = Data.shape[0]
X = Data[:, i]
if len(PAi) != 0:
PA = Data[:, PAi]
D = PA.shape[1]
# derive the parameters by maximum likelihood
H = PA * pdinv(PA.T * PA) * PA.T
E = X - H * X
sigma2 = np.sum(np.power(E, 2)) / T
# BIC
score = T * np.log(sigma2) + lambda_value * D * np.log(T)
else:
sigma2 = np.sum(np.power(X, 2)) / T
# BIC
score = T * np.log(sigma2)
return score
def local_score_BDeu(Data, i, PAi, parameters=None):
'''
Calculate the *negative* local score with BDeu for the discrete case
Parameters
----------
Data: (sample, features)
i: current index
PAi: parent indexes
parameters:
sample_prior: sample prior
structure_prior: structure prior
r_i_map: number of states of the finite random variable X_{i}
Returns
-------
score: local BDeu score
'''
if parameters is None:
sample_prior = 1 # default sample_prior = 1
structure_prior = 1 # default structure_prior = 1
r_i_map = {i: len(np.unique(np.asarray(Data[:, i]))) for i in range(Data.shape[1])}
else:
sample_prior = parameters['sample_prior']
structure_prior = parameters['structure_prior']
r_i_map = parameters['r_i_map']
# calculate q_{i}
q_i = 1
for pa in PAi:
q_i *= r_i_map[pa]
if len(PAi) != 0:
# calculate N_{ij}
names = ['x{}'.format(i) for i in range(Data.shape[1])]
Data_pd = pd.DataFrame(Data, columns=names)
parant_names = ['x{}'.format(i) for i in PAi]
Data_pd_group_Nij = Data_pd.groupby(parant_names)
Nij_map = {key: len(Data_pd_group_Nij.indices.get(key)) for key in Data_pd_group_Nij.indices.keys()}
Nij_map_keys_list = list(Nij_map.keys())
# calculate N_{ijk}
Nijk_map = {ij: Data_pd_group_Nij.get_group(ij).groupby('x{}'.format(i)).apply(len).reset_index() for ij in
Nij_map.keys()}
for v in Nijk_map.values():
v.columns = ['x{}'.format(i), 'times']
else:
# calculate N_{ij}
names = ['x{}'.format(i) for i in range(Data.shape[1])]
Nij_map = {'': len(Data[:, i])}
Nij_map_keys_list = ['']
Data_pd = pd.DataFrame(Data, columns=names)
# calculate N_{ijk}
Nijk_map = {ij: Data_pd.groupby('x{}'.format(i)).apply(len).reset_index() for ij in Nij_map_keys_list}
for v in Nijk_map.values():
v.columns = ['x{}'.format(i), 'times']
BDeu_score = 0
# first term
vm = Data.shape[0] - 1
BDeu_score += len(PAi) * np.log(structure_prior / vm) + (vm - len(PAi)) * np.log(1 - (structure_prior / vm))
# second term
for pa in range(len(Nij_map_keys_list)):
Nij = Nij_map.get(Nij_map_keys_list[pa])
first_term = math.lgamma(sample_prior / q_i) - math.lgamma(Nij + sample_prior / q_i)
second_term = 0
Nijk_list = Nijk_map.get(Nij_map_keys_list[pa])['times'].to_numpy()
for Nijk in Nijk_list:
second_term += math.lgamma(Nijk + sample_prior / (r_i_map[i] * q_i)) - math.lgamma(
sample_prior / (r_i_map[i] * q_i))
BDeu_score += first_term + second_term
return -BDeu_score
def local_score_cv_general(Data, Xi, PAi, parameters):
'''
Calculate the local score
using negative k-fold cross-validated log likelihood as the score
based on a regression model in RKHS
Parameters
----------
Data: (sample, features)
i: current index
PAi: parent indexes
parameters:
kfold: k-fold cross validation
lambda: regularization parameter
Returns
-------
score: local score
'''
Data = np.mat(Data)
PAi = list(PAi)
T = Data.shape[0]
X = Data[:, Xi]
var_lambda = parameters['lambda'] # regularization parameter
k = parameters['kfold'] # k-fold cross validation
n0 = math.floor(T / k)
gamma = 0.01
Thresh = 1E-5
if (len(PAi)):
PA = Data[:, PAi]
# set the kernel for X
GX = np.sum(np.multiply(X, X), axis=1)
Q = np.tile(GX, (1, T))
R = np.tile(GX.T, (T, 1))
dists = Q + R - 2 * X * X.T
dists = dists - np.tril(dists)
dists = np.reshape(dists, (T ** 2, 1))
width = np.sqrt(0.5 * np.median(dists[np.where(dists > 0)], axis=1)[0, 0]) # median value
width = width * 2
theta = 1 / (width ** 2)
Kx, _ = kernel(X, X, (theta, 1)) # Gaussian kernel
H0 = np.mat(np.eye(T)) - np.mat(np.ones((T, T))) / (T) # for centering of the data in feature space
Kx = H0 * Kx * H0 # kernel matrix for X
# eig_Kx, eix = eigdec((Kx + Kx.T)/2, np.min([400, math.floor(T/2)]), evals_only=False) # /2
# IIx = np.where(eig_Kx > np.max(eig_Kx) * Thresh)[0]
# eig_Kx = eig_Kx[IIx]
# eix = eix[:, IIx]
# mx = len(IIx)
# set the kernel for PA
Kpa = np.mat(np.ones((T, T)))
for m in range(PA.shape[1]):
G = np.sum((np.multiply(PA[:, m], PA[:, m])), axis=1)
Q = np.tile(G, (1, T))
R = np.tile(G.T, (T, 1))
dists = Q + R - 2 * PA[:, m] * PA[:, m].T
dists = dists - np.tril(dists)
dists = np.reshape(dists, (T ** 2, 1))
width = np.sqrt(0.5 * np.median(dists[np.where(dists > 0)], axis=1)[0, 0]) # median value
width = width * 2
theta = 1 / (width ** 2)
Kpa = np.multiply(Kpa, kernel(PA[:, m], PA[:, m], (theta, 1))[0])
H0 = np.mat(np.eye(T)) - np.mat(np.ones((T, T))) / (T) # for centering of the data in feature space
Kpa = H0 * Kpa * H0 # kernel matrix for PA
CV = 0
for kk in range(k):
if (kk == 0):
Kx_te = Kx[0:n0, 0:n0]
Kx_tr = Kx[n0: T, n0: T]
Kx_tr_te = Kx[n0: T, 0: n0]
Kpa_te = Kpa[0:n0, 0: n0]
Kpa_tr = Kpa[n0: T, n0: T]
Kpa_tr_te = Kpa[n0: T, 0: n0]
nv = n0 # sample size of validated data
if (kk == k - 1):
Kx_te = Kx[kk * n0:T, kk * n0: T]
Kx_tr = Kx[0:kk * n0, 0: kk * n0]
Kx_tr_te = Kx[0:kk * n0, kk * n0: T]
Kpa_te = Kpa[kk * n0:T, kk * n0: T]
Kpa_tr = Kpa[0: kk * n0, 0: kk * n0]
Kpa_tr_te = Kpa[0:kk * n0, kk * n0: T]
nv = T - kk * n0
if (kk < k - 1 and kk > 0):
Kx_te = Kx[kk * n0: (kk + 1) * n0, kk * n0: (kk + 1) * n0]
Kx_tr = Kx[np.ix_(np.concatenate([np.arange(0, kk * n0), np.arange((kk + 1) * n0, T)]),
np.concatenate([np.arange(0, kk * n0), np.arange((kk + 1) * n0, T)]))]
Kx_tr_te = Kx[np.ix_(np.concatenate([np.arange(0, kk * n0), np.arange((kk + 1) * n0, T)]),
np.arange(kk * n0, (kk + 1) * n0))]
Kpa_te = Kpa[kk * n0: (kk + 1) * n0, kk * n0: (kk + 1) * n0]
Kpa_tr = Kpa[np.ix_(np.concatenate([np.arange(0, kk * n0), np.arange((kk + 1) * n0, T)]),
np.concatenate([np.arange(0, kk * n0), np.arange((kk + 1) * n0, T)]))]
Kpa_tr_te = Kpa[np.ix_(np.concatenate([np.arange(0, kk * n0), np.arange((kk + 1) * n0, T)]),
np.arange(kk * n0, (kk + 1) * n0))]
nv = n0
n1 = T - nv
tmp1 = pdinv(Kpa_tr + n1 * var_lambda * np.mat(np.eye(n1)))
tmp2 = tmp1 * Kx_tr * tmp1
tmp3 = tmp1 * pdinv(np.mat(np.eye(n1)) + n1 * var_lambda ** 2 / gamma * tmp2) * tmp1
A = (Kx_te + Kpa_tr_te.T * tmp2 * Kpa_tr_te - 2 * Kx_tr_te.T * tmp1 * Kpa_tr_te
- n1 * var_lambda ** 2 / gamma * Kx_tr_te.T * tmp3 * Kx_tr_te
- n1 * var_lambda ** 2 / gamma * Kpa_tr_te.T * tmp1 * Kx_tr * tmp3 * Kx_tr * tmp1 * Kpa_tr_te
+ 2 * n1 * var_lambda ** 2 / gamma * Kx_tr_te.T * tmp3 * Kx_tr * tmp1 * Kpa_tr_te) / gamma
B = n1 * var_lambda ** 2 / gamma * tmp2 + np.mat(np.eye(n1))
L = np.linalg.cholesky(B)
C = np.sum(np.log(np.diag(L)))
# CV = CV + (nv*nv*log(2*pi) + nv*C + nv*mx*log(gamma) + trace(A))/2;
CV = CV + (nv * nv * np.log(2 * np.pi) + nv * C + np.trace(A)) / 2
CV = CV / k
else:
# set the kernel for X
GX = np.sum(np.multiply(X, X), axis=1)
Q = np.tile(GX, (1, T))
R = np.tile(GX.T, (T, 1))
dists = Q + R - 2 * X * X.T
dists = dists - np.tril(dists)
dists = np.reshape(dists, (T ** 2, 1))
width = np.sqrt(0.5 * np.median(dists[np.where(dists > 0)], axis=1)[0, 0]) # median value
width = width * 2
theta = 1 / (width ** 2)
Kx, _ = kernel(X, X, (theta, 1)) # Gaussian kernel
H0 = np.mat(np.eye(T)) - np.mat(np.ones((T, T))) / (T) # for centering of the data in feature space
Kx = H0 * Kx * H0 # kernel matrix for X
# eig_Kx, eix = eigdec((Kx + Kx.T) / 2, np.min([400, math.floor(T / 2)]), evals_only=False) # /2
# IIx = np.where(eig_Kx > np.max(eig_Kx) * Thresh)[0]
# mx = len(IIx)
CV = 0
for kk in range(k):
if (kk == 0):
Kx_te = Kx[kk * n0: (kk + 1) * n0, kk * n0: (kk + 1) * n0]
Kx_tr = Kx[(kk + 1) * n0:T, (kk + 1) * n0: T]
Kx_tr_te = Kx[(kk + 1) * n0:T, kk * n0: (kk + 1) * n0]
nv = n0
if (kk == k - 1):
Kx_te = Kx[kk * n0: T, kk * n0: T]
Kx_tr = Kx[0: kk * n0, 0: kk * n0]
Kx_tr_te = Kx[0:kk * n0, kk * n0: T]
nv = T - kk * n0
if (kk < k - 1 and kk > 0):
Kx_te = Kx[kk * n0: (kk + 1) * n0, kk * n0: (kk + 1) * n0]
Kx_tr = Kx[np.ix_(np.concatenate([np.arange(0, kk * n0), np.arange((kk + 1) * n0, T)]),
np.concatenate([np.arange(0, kk * n0), np.arange((kk + 1) * n0, T)]))]
Kx_tr_te = Kx[np.ix_(np.concatenate([np.arange(0, kk * n0), np.arange((kk + 1) * n0, T)]),
np.arange(kk * n0, (kk + 1) * n0))]
nv = n0
n1 = T - nv
A = (Kx_te - 1 / (gamma * n1) * Kx_tr_te.T * pdinv(
np.mat(np.eye(n1)) + 1 / (gamma * n1) * Kx_tr) * Kx_tr_te) / gamma
B = 1 / (gamma * n1) * Kx_tr + np.mat(np.eye(n1))
L = np.linalg.cholesky(B)
C = np.sum(np.log(np.diag(L)))
# CV = CV + (nv*nv*log(2*pi) + nv*C + nv*mx*log(gamma) + trace(A))/2;
CV = CV + (nv * nv * np.log(2 * np.pi) + nv * C + np.trace(A)) / 2
CV = CV / k
score = CV # negative cross-validated likelihood
return score
def local_score_cv_multi(Data, Xi, PAi, parameters):
'''
Calculate the local score
using negative k-fold cross-validated log likelihood as the score
based on a regression model in RKHS
for variables with multi-variate dimensions
Parameters
----------
Data: (sample, features)
i: current index
PAi: parent indexes
parameters:
kfold: k-fold cross validation
lambda: regularization parameter
dlabel: for variables with multi-dimensions,
indicate which dimensions belong to the i-th variable.
Returns
-------
score: local score
'''
T = Data.shape[0]
X = Data[:, parameters['dlabel'][Xi]]
var_lambda = parameters['lambda'] # regularization parameter
k = parameters['kfold'] # k-fold cross validation
n0 = math.floor(T / k)
gamma = 0.01
Thresh = 1E-5
if (len(PAi)):
# set the kernel for X
GX = np.sum(np.multiply(X, X), axis=1)
Q = np.tile(GX, (1, T))
R = np.tile(GX.T, (T, 1))
dists = Q + R - 2 * X * X.T
dists = dists - np.tril(dists)
dists = np.reshape(dists, (T ** 2, 1))
width = np.sqrt(0.5 * np.median(dists[np.where(dists > 0)], axis=1)[0, 0]) # median value
width = width * 3 ###
theta = 1 / (width ** 2 * X.shape[1]) #
Kx, _ = kernel(X, X, (theta, 1)) # Gaussian kernel
H0 = np.mat(np.eye(T)) - np.mat(np.ones((T, T))) / (T) # for centering of the data in feature space
Kx = H0 * Kx * H0 # kernel matrix for X
# set the kernel for PA
Kpa = np.mat(np.ones((T, T)))
for m in range(len(PAi)):
PA = Data[:, parameters['dlabel'][PAi[m]]]
G = np.sum((np.multiply(PA, PA)), axis=1)
Q = np.tile(G, (1, T))
R = np.tile(G.T, (T, 1))
dists = Q + R - 2 * PA * PA.T
dists = dists - np.tril(dists)
dists = np.reshape(dists, (T ** 2, 1))
width = np.sqrt(0.5 * np.median(dists[np.where(dists > 0)], axis=1)[0, 0]) # median value
width = width * 3 ###
theta = 1 / (width ** 2 * PA.shape[1])
Kpa = np.multiply(Kpa, kernel(PA, PA, (theta, 1))[0])
H0 = np.mat(np.eye(T)) - np.mat(np.ones((T, T))) / (T) # for centering of the data in feature space
Kpa = H0 * Kpa * H0 # kernel matrix for PA
CV = 0
for kk in range(k):
if (kk == 0):
Kx_te = Kx[0:n0, 0:n0]
Kx_tr = Kx[n0: T, n0: T]
Kx_tr_te = Kx[n0: T, 0: n0]
Kpa_te = Kpa[0:n0, 0: n0]
Kpa_tr = Kpa[n0: T, n0: T]
Kpa_tr_te = Kpa[n0: T, 0: n0]
nv = n0 # sample size of validated data
if (kk == k - 1):
Kx_te = Kx[kk * n0:T, kk * n0: T]
Kx_tr = Kx[0:kk * n0, 0: kk * n0]
Kx_tr_te = Kx[0:kk * n0, kk * n0: T]
Kpa_te = Kpa[kk * n0:T, kk * n0: T]
Kpa_tr = Kpa[0: kk * n0, 0: kk * n0]
Kpa_tr_te = Kpa[0:kk * n0, kk * n0: T]
nv = T - kk * n0
if (kk < k - 1 and kk > 0):
Kx_te = Kx[kk * n0: (kk + 1) * n0, kk * n0: (kk + 1) * n0]
Kx_tr = Kx[np.ix_(np.concatenate([np.arange(0, kk * n0), np.arange((kk + 1) * n0, T)]),
np.concatenate([np.arange(0, kk * n0), np.arange((kk + 1) * n0, T)]))]
Kx_tr_te = Kx[np.ix_(np.concatenate([np.arange(0, kk * n0), np.arange((kk + 1) * n0, T)]),
np.arange(kk * n0, (kk + 1) * n0))]
Kpa_te = Kpa[kk * n0: (kk + 1) * n0, kk * n0: (kk + 1) * n0]
Kpa_tr = Kpa[np.ix_(np.concatenate([np.arange(0, kk * n0), np.arange((kk + 1) * n0, T)]),
np.concatenate([np.arange(0, kk * n0), np.arange((kk + 1) * n0, T)]))]
Kpa_tr_te = Kpa[np.ix_(np.concatenate([np.arange(0, kk * n0), np.arange((kk + 1) * n0, T)]),
np.arange(kk * n0, (kk + 1) * n0))]
nv = n0
n1 = T - nv
tmp1 = pdinv(Kpa_tr + n1 * var_lambda * np.mat(np.eye(n1)))
tmp2 = tmp1 * Kx_tr * tmp1
tmp3 = tmp1 * pdinv(np.mat(np.eye(n1)) + n1 * var_lambda ** 2 / gamma * tmp2) * tmp1
A = (Kx_te + Kpa_tr_te.T * tmp2 * Kpa_tr_te - 2 * Kx_tr_te.T * tmp1 * Kpa_tr_te
- n1 * var_lambda ** 2 / gamma * Kx_tr_te.T * tmp3 * Kx_tr_te
- n1 * var_lambda ** 2 / gamma * Kpa_tr_te.T * tmp1 * Kx_tr * tmp3 * Kx_tr * tmp1 * Kpa_tr_te
+ 2 * n1 * var_lambda ** 2 / gamma * Kx_tr_te.T * tmp3 * Kx_tr * tmp1 * Kpa_tr_te) / gamma
B = n1 * var_lambda ** 2 / gamma * tmp2 + np.mat(np.eye(n1))
L = np.linalg.cholesky(B)
C = np.sum(np.log(np.diag(L)))
# CV = CV + (nv*nv*log(2*pi) + nv*C + nv*mx*log(gamma) + trace(A))/2;
CV = CV + (nv * nv * np.log(2 * np.pi) + nv * C + np.trace(A)) / 2
CV = CV / k
else:
# set the kernel for X
GX = np.sum(np.multiply(X, X), axis=1)
Q = np.tile(GX, (1, T))
R = np.tile(GX.T, (T, 1))
dists = Q + R - 2 * X * X.T
dists = dists - np.tril(dists)
dists = np.reshape(dists, (T ** 2, 1))
width = np.sqrt(0.5 * np.median(dists[np.where(dists > 0)], axis=1)[0, 0]) # median value
width = width * 3 ###
theta = 1 / (width ** 2 * X.shape[1]) #
Kx, _ = kernel(X, X, (theta, 1)) # Gaussian kernel
H0 = np.mat(np.eye(T)) - np.mat(np.ones((T, T))) / (T) # for centering of the data in feature space
Kx = H0 * Kx * H0 # kernel matrix for X
CV = 0
for kk in range(k):
if (kk == 0):
Kx_te = Kx[kk * n0: (kk + 1) * n0, kk * n0: (kk + 1) * n0]
Kx_tr = Kx[(kk + 1) * n0:T, (kk + 1) * n0: T]
Kx_tr_te = Kx[(kk + 1) * n0:T, kk * n0: (kk + 1) * n0]
nv = n0
if (kk == k - 1):
Kx_te = Kx[kk * n0: T, kk * n0: T]
Kx_tr = Kx[0: kk * n0, 0: kk * n0]
Kx_tr_te = Kx[0:kk * n0, kk * n0: T]
nv = T - kk * n0
if (kk < k - 1 and kk > 0):
Kx_te = Kx[kk * n0: (kk + 1) * n0, kk * n0: (kk + 1) * n0]
Kx_tr = Kx[np.ix_(np.concatenate([np.arange(0, kk * n0), np.arange((kk + 1) * n0, T)]),
np.concatenate([np.arange(0, kk * n0), np.arange((kk + 1) * n0, T)]))]
Kx_tr_te = Kx[np.ix_(np.concatenate([np.arange(0, kk * n0), np.arange((kk + 1) * n0, T)]),
np.arange(kk * n0, (kk + 1) * n0))]
nv = n0
n1 = T - nv
A = (Kx_te - 1 / (gamma * n1) * Kx_tr_te.T * pdinv(
np.mat(np.eye(n1)) + 1 / (gamma * n1) * Kx_tr) * Kx_tr_te) / gamma
B = 1 / (gamma * n1) * Kx_tr + np.mat(np.eye(n1))
L = np.linalg.cholesky(B)
C = np.sum(np.log(np.diag(L)))
# CV = CV + (nv*nv*log(2*pi) + nv*C + nv*mx*log(gamma) + trace(A))/2;
CV = CV + (nv * nv * np.log(2 * np.pi) + nv * C + np.trace(A)) / 2
CV = CV / k
score = CV # negative cross-validated likelihood
return score
def local_score_marginal_general(Data, Xi, PAi, parameters):
'''
Calculate the local score by negative marginal likelihood
based on a regression model in RKHS
Parameters
----------
Data: (sample, features)
i: current index
PAi: parent indexes
parameters: None
Returns
-------
score: local score
'''
T = Data.shape[0]
X = Data[:, Xi]
dX = X.shape[1]
# set the kernel for X
GX = np.sum(np.multiply(X, X), axis=1)
Q = np.tile(GX, (1, T))
R = np.tile(GX.T, (T, 1))
dists = Q + R - 2 * X * X.T
dists = dists - np.tril(dists)
dists = np.reshape(dists, (T ** 2, 1))
width = np.sqrt(0.5 * np.median(dists[np.where(dists > 0)], axis=1)[0, 0])
width = width * 2.5 # kernel width
theta = 1 / (width ** 2)
H = np.mat(np.eye(T)) - np.mat(np.ones((T, T))) / T
Kx, _ = kernel(X, X, (theta, 1))
Kx = H * Kx * H
Thresh = 1E-5
eig_Kx, eix = eigdec((Kx + Kx.T) / 2, np.min([400, math.floor(T / 4)]), evals_only=False) # /2
IIx = np.where(eig_Kx > np.max(eig_Kx) * Thresh)[0]
eig_Kx = eig_Kx[IIx]
eix = eix[:, IIx]
if (len(PAi)):
PA = Data[:, PAi]
widthPA = np.mat(np.empty((PA.shape[1], 1)))
# set the kernel for PA
for m in range(PA.shape[1]):
G = np.sum((np.multiply(PA[:, m], PA[:, m])), axis=1)
Q = np.tile(G, (1, T))
R = np.tile(G.T, (T, 1))
dists = Q + R - 2 * PA[:, m] * PA[:, m].T
dists = dists - np.tril(dists)
dists = np.reshape(dists, (T ** 2, 1))
widthPA[m] = np.sqrt(0.5 * np.median(dists[np.where(dists > 0)], axis=1)[0, 0])
widthPA = widthPA * 2.5 # kernel width
covfunc = np.asarray(['covSum', ['covSEard', 'covNoise']])
logtheta0 = np.vstack([np.log(widthPA), 0, np.log(np.sqrt(0.1))])
logtheta, fvals, iter = minimize(logtheta0, 'gpr_multi_new', -300, covfunc, PA,
2 * np.sqrt(T) * eix * np.diag(np.sqrt(eig_Kx)) / np.sqrt(eig_Kx[0]))
nlml, dnlml = gpr_multi_new(logtheta, covfunc, PA,
2 * np.sqrt(T) * eix * np.diag(np.sqrt(eig_Kx)) / np.sqrt(eig_Kx[0]),
nargout=2)
else:
covfunc = np.asarray(['covSum', ['covSEard', 'covNoise']])
PA = np.mat(np.zeros((T, 1)))
logtheta0 = np.mat([100, 0, np.log(np.sqrt(0.1))]).T
logtheta, fvals, iter = minimize(logtheta0, 'gpr_multi_new', -300, covfunc, PA,
2 * np.sqrt(T) * eix * np.diag(np.sqrt(eig_Kx)) / np.sqrt(eig_Kx[0]))
nlml, dnlml = gpr_multi_new(logtheta, covfunc, PA,
2 * np.sqrt(T) * eix * np.diag(np.sqrt(eig_Kx)) / np.sqrt(eig_Kx[0]),
nargout=2)
score = nlml # negative log-likelihood
return score
def local_score_marginal_multi(Data, Xi, PAi, parameters):
'''
Calculate the local score by negative marginal likelihood
based on a regression model in RKHS
for variables with multi-variate dimensions
Parameters
----------
Data: (sample, features)
i: current index
PAi: parent indexes
parameters:
dlabel: for variables with multi-dimensions,
indicate which dimensions belong to the i-th variable.
Returns
-------
score: local score
'''
T = Data.shape[0]
X = Data[:, parameters['dlabel'][Xi]]
dX = X.shape[1]
# set the kernel for X
GX = np.sum(np.multiply(X, X), axis=1)
Q = np.tile(GX, (1, T))
R = np.tile(GX.T, (T, 1))
dists = Q + R - 2 * X * X.T
dists = dists - np.tril(dists)
dists = np.reshape(dists, (T ** 2, 1))
widthX = np.sqrt(0.5 * np.median(dists[np.where(dists > 0)], axis=1)[0, 0])
widthX = widthX * 2.5 # kernel width
theta = 1 / (widthX ** 2)
H = np.mat(np.eye(T)) - np.mat(np.ones((T, T))) / T
Kx, _ = kernel(X, X, (theta, 1))
Kx = H * Kx * H
Thresh = 1E-5
eig_Kx, eix = eigdec((Kx + Kx.T) / 2, np.min([400, math.floor(T / 4)]), evals_only=False) # /2
IIx = np.where(eig_Kx > np.max(eig_Kx) * Thresh)[0]
eig_Kx = eig_Kx[IIx]
eix = eix[:, IIx]
if (len(PAi)):
widthPA_all = np.mat(np.empty((1, 0)))
# set the kernel for PA
PA_all = np.mat(np.empty((Data.shape[0], 0)))
for m in range(len(PAi)):
PA = Data[:, parameters['dlabel'][PAi[m]]]
PA_all = np.hstack([PA_all, PA])
G = np.sum((np.multiply(PA, PA)), axis=1)
Q = np.tile(G, (1, T))
R = np.tile(G.T, (T, 1))
dists = Q + R - 2 * PA * PA.T
dists = dists - np.tril(dists)
dists = np.reshape(dists, (T ** 2, 1))
widthPA = np.sqrt(0.5 * np.median(dists[np.where(dists > 0)], axis=1)[0, 0])
widthPA_all = np.hstack(
[widthPA_all, widthPA * np.mat(np.ones((1, np.size(parameters['dlabel'][PAi[m]]))))])
widthPA_all = widthPA_all * 2.5 # kernel width
covfunc = np.asarray(['covSum', ['covSEard', 'covNoise']])
logtheta0 = np.vstack([np.log(widthPA_all.T), 0, np.log(np.sqrt(0.1))])
logtheta, fvals, iter = minimize(logtheta0, 'gpr_multi_new', -300, covfunc, PA_all,
2 * np.sqrt(T) * eix * np.diag(np.sqrt(eig_Kx)) / np.sqrt(eig_Kx[0]))
nlml, dnlml = gpr_multi_new(logtheta, covfunc, PA_all,
2 * np.sqrt(T) * eix * np.diag(np.sqrt(eig_Kx)) / np.sqrt(eig_Kx[0]),
nargout=2)
else:
covfunc = np.asarray(['covSum', ['covSEard', 'covNoise']])
PA = np.mat(np.zeros((T, 1)))
logtheta0 = np.mat([100, 0, np.log(np.sqrt(0.1))]).T
logtheta, fvals, iter = minimize(logtheta0, 'gpr_multi_new', -300, covfunc, PA,
2 * np.sqrt(T) * eix * np.diag(np.sqrt(eig_Kx)) / np.sqrt(eig_Kx[0]))
nlml, dnlml = gpr_multi_new(logtheta, covfunc, PA,
2 * np.sqrt(T) * eix * np.diag(np.sqrt(eig_Kx)) / np.sqrt(eig_Kx[0]),
nargout=2)
score = nlml # negative log-likelihood
return score
|
478469
|
from .ignore import ignore # noqa
from .copy import copy # noqa
from .compile_scss import compile_scss # noqa
from .render_html import render_html_factory # noqa
|
478537
|
import typing as t
from abc import ABCMeta, abstractmethod
if t.TYPE_CHECKING:
from typing_extensions import Protocol
class SessionLike(Protocol):
@t.overload
def get(self, key):
# type: (str) -> t.Any
pass
@t.overload
def get(self, key, default): # pylint: disable=function-redefined
# type: (str, t.Any) -> t.Any
pass
def get(self, key, default=None): # pylint: disable=function-redefined
# type: (str, t.Any) -> t.Any
pass
def __setitem__(self, key, value):
# type: (str, t.Any) -> None
pass
def __contains__(self, key):
# type: (object) -> bool
pass
class Request(object):
__metaclass__ = ABCMeta
@property
def session(self):
# type: () -> SessionLike
raise NotImplementedError
@abstractmethod
def is_secure(self):
# type: () -> bool
raise NotImplementedError
@abstractmethod
def get_param(self, key):
# type: (str) -> object
raise NotImplementedError
|
478538
|
import sys
import logging
from logging import FileHandler,StreamHandler
import os
import gzip
import subprocess
import argparse
from grsnp import worker_optimizer as worker_opt
import pdb
from celery import group
import celeryconfiguration_optimizer
from time import sleep
import atexit
# connection information for the ucsc ftp server
logger = logging.getLogger()
def create_bkg_gf_overlap_db(gf_dir,background_dir,data_dir):
""" Used to precalculate the overlapStatistics for the GFs against each of the
default backgrounds.
"""
gf_bg_stats,list_completed_gfs = {},[]
all_gfs = []
backgrounds,gfs=[],[]
db_path = os.path.join(data_dir,gf_dir,"bkg_overlaps.gr")
full_gf_dir = os.path.join(data_dir,gf_dir)
full_background_dir = os.path.join(data_dir,background_dir)
print db_path
# Read in all completed GF in the partially completed bkg_overlaps file
if os.path.exists(db_path):
list_completed_gfs = [x.split("\t")[0] for x in open(db_path).read().split("\n")]
print [x for x in list_completed_gfs if "wgEncodeHaibTfbsEcc1Tcf12V0422111PkRep2" in x]
# gather all directories (groups) in the database
dirs = [name for name in os.listdir(full_gf_dir)
if os.path.isdir(os.path.join(full_gf_dir, name))]
# Gather backgrounds paths
backgrounds = [os.path.join(background_dir, f) for f in os.listdir(full_background_dir) if f.endswith(('.gz', '.bb',".txt",".bed"))]
cur_prog,prog_max = 1,_count_gfs(full_gf_dir)
# Process each category of GFs
for d in dirs:
# Gather gfs paths
gfs = []
for base, d, files in os.walk(os.path.join(full_gf_dir,d)):
partial_base = base.replace(data_dir,"") # get the relative path of the database
gfs += [os.path.join(partial_base, f) for f
in files if f.endswith(('.gz', '.bb'))]
all_gfs += [x for x in gfs if x not in list_completed_gfs]
prog_gf = 1
# Run overlap analysis using Celery
logger.info("Running overlapStatistics for all GFs in {}".format(gf_dir))
results = group(worker_opt.calculate_bkg_gf_overlap.s(gf_path=g,list_bkg_paths=backgrounds).set(queue='optimizer.group') for g in all_gfs)()
while not results.ready():
sys.stdout.write("{} of {} completed...\r".format(results.completed_count(),len(all_gfs)))
sys.stdout.flush()
sleep(5.0)
results = results.join()
write_results(results,db_path)
def write_results(results,outputpath):
''' Results are written out to a temporary file which replaces the existing file if after successfully
being written.
NOTES:
Results data structure is as follows:
[{gf1_path: [{background1_overlapstatistics},{background2_overlapstatistics} ...]},
{gf2_path: [{background1_overlapstatistics},{background2_overlapstatistics} ...]} ...
]
'''
with open(outputpath,'a') as writer:
for res in results:
if isinstance(res,str):
logger.error(res)
continue
gf = res.keys()[0]
stats = res[gf]
stat_line = [x["queryfile"]+":"+str(x["intersectregions"])+":"+str(x["queryregions"]) for x in stats]
stat_line = ",".join(stat_line) + "\n"
writer.write(gf+"\t"+stat_line)
def _count_gfs(grsnp_db):
x = 0
for root, dirs, files in os.walk(grsnp_db):
for f in files:
if f.endswith(".bed.gz"):
x = x+1
return x
def shutdown_workers():
# kill all existing optimizer workers
print "Stopping local workers..."
script = "ps auxww | grep -E '*grsnp_optimizerLOCAL*' | awk '{print $2}' | xargs kill -9"
out = subprocess.Popen(script,shell=True)
out.wait()
print "Removing leftover optimizer jobs from Celery queue 'optimizer.group'..."
script = "celery amqp queue.purge optimizer.group --broker " + celeryconfiguration_optimizer.CELERY_RESULT_BACKEND
out = subprocess.Popen(script,shell=True)
out.wait()
def main():
parser = argparse.ArgumentParser(prog="python -m grsnp.optimizer", description="""Pre calculates the overlapStatistics for each of the backgrounds in <db_path>/custom_data/backgrounds/<organism> and genomic features in <db_path>/grsnp_db/<organism>. Example: python -m grsnp.optimizer -d /home/username/grs_db/ -g mm9""", epilog="""Creates a file <db_path>/grsnp_db/<organism>/bkg_overlap.gr, automatically used by the server to speed up the analyses""")
parser.add_argument('--data_dir','-d', nargs="?", help="Set the directory containing the database. Required. Use absolute path. Example: /home/username/db_2.00_6.26.2014/.", required=True)
parser.add_argument('--organism','-g', nargs="?", help="The UCSC code for the organism to use. Default: hg19 (human). Data for the organism must exist in the database directory. Use dbcreator to make the database, if needed.", required=True, default="hg19")
parser.add_argument("--num_workers", "-w", type=int, help="The number of local celery workers to start. Default: 1", default=1)
args = vars(parser.parse_args())
hdlr = logging.FileHandler(os.path.join(args["data_dir"],'genomerunner_optimizer.log'))
hdlr_std = StreamHandler()
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
hdlr_std.setFormatter(formatter)
logger.addHandler(hdlr)
logger.addHandler(hdlr_std)
logger.setLevel(logging.INFO)
atexit.register(shutdown_workers) # shutdown workers on termination
logger.info('Running optimization')
# start redis server
script = ["redis-server", "--port", str(celeryconfiguration_optimizer.redis_port)]
fh = open("redis.log","w")
out = subprocess.Popen(script,stdout=fh,stderr=fh)
for i in range(args["num_workers"]):
fh = open("worker{}.log".format(i),"w")
script = ["celery","worker", "--app", "grsnp.worker_optimizer","--loglevel", "INFO", '-Q','optimizer.group', "-n", "grsnp_optimizerLOCAL{}.%h".format(i),'--data_dir',args['data_dir']]
out = subprocess.Popen(script,stdout=fh,stderr=fh)
print "Redis backend URL: ", celeryconfiguration_optimizer.CELERY_RESULT_BACKEND
# find all the folders with GF data including those filtered by score
grdb_dirs = [os.path.join(args["data_dir"],name) for name in os.listdir(args["data_dir"])
if os.path.isdir(os.path.join(args["data_dir"], name)) and "grsnp_db" in name]
for gr_dir in grdb_dirs:
background_dir = os.path.join("custom_data","backgrounds",args["organism"])
gfs_dir = os.path.join(os.path.split(gr_dir)[1],args["organism"])
if not os.path.exists(os.path.join(args['data_dir'], gfs_dir)):
continue
if not os.path.exists(os.path.join(args['data_dir'],background_dir)):
print "ERROR: No backgrounds found in default background directory {}. Please add backgrounds.".format(background_dir)
sys.exit()
logger.info("Pre calculating statistics for GR database in")
create_bkg_gf_overlap_db(gf_dir=gfs_dir,background_dir=background_dir,data_dir=args['data_dir'])
logger.info("Finished creating optimization files.")
if __name__ == "__main__":
main()
|
478558
|
from django.db import models
SOURCE = "source"
def SINK(arg):
print(arg)
assert arg == SOURCE
def SINK_F(arg):
print(arg)
assert arg != SOURCE
# ==============================================================================
# Inheritance
#
# If base class defines a field, there can be
# 1. flow when field is assigned on subclass construction to lookup of base class
# 2. no flow from field assignment on subclass A to lookup of sibling subclass B
# 3. no flow from field assignment on base class to lookup of subclass
# ==============================================================================
# ------------------------------------------------------------------------------
# Inheritance with vanilla Django
# ------------------------------------------------------------------------------
class Book(models.Model):
title = models.CharField(max_length=256)
class PhysicalBook(Book):
physical_location = models.CharField(max_length=256)
same_name_different_value = models.CharField(max_length=256)
class EBook(Book):
download_link = models.CharField(max_length=256)
same_name_different_value = models.CharField(max_length=256)
def save_base_book():
return Book.objects.create(
title=SOURCE,
)
def fetch_book(id):
book = Book.objects.get(id=id)
try:
# This sink should have 2 sources, from `save_base_book` and
# `save_physical_book`
SINK(book.title) # $ flow="SOURCE, l:-10 -> book.title" flow="SOURCE, l:+21 -> book.title"
# The sink assertion will fail for the EBook, which we handle. The title attribute
# of a Book could be tainted, so we want this to be a sink in general.
except AssertionError:
if book.title == "safe ebook":
pass
else:
raise
assert not isinstance(book, PhysicalBook)
assert not isinstance(book, EBook)
try:
SINK_F(book.physical_location)
raise Exception("This field is not available with vanilla Django")
except AttributeError:
pass
def save_physical_book():
return PhysicalBook.objects.create(
title=SOURCE,
physical_location=SOURCE,
same_name_different_value=SOURCE,
)
def fetch_physical_book(id):
book = PhysicalBook.objects.get(id=id)
# This sink should have only 1 sources, from `save_physical_book`
SINK(book.title) # $ flow="SOURCE, l:-10 -> book.title"
SINK(book.physical_location) # $ flow="SOURCE, l:-10 -> book.physical_location"
SINK(book.same_name_different_value) # $ flow="SOURCE, l:-10 -> book.same_name_different_value"
def save_ebook():
return EBook.objects.create(
title="safe ebook",
download_link="safe",
same_name_different_value="safe",
)
def fetch_ebook(id):
book = EBook.objects.get(id=id)
SINK_F(book.title)
SINK_F(book.download_link)
SINK_F(book.same_name_different_value)
# ------------------------------------------------------------------------------
# Inheritance with `django-polymorphic`, which automatically turns lookups on the
# base class into the right subclass
#
# see https://django-polymorphic.readthedocs.io/en/stable/quickstart.html
# ------------------------------------------------------------------------------
from polymorphic.models import PolymorphicModel
class PolyBook(PolymorphicModel):
title = models.CharField(max_length=256)
class PolyPhysicalBook(PolyBook):
physical_location = models.CharField(max_length=256)
same_name_different_value = models.CharField(max_length=256)
class PolyEBook(PolyBook):
download_link = models.CharField(max_length=256)
same_name_different_value = models.CharField(max_length=256)
def poly_save_base_book():
return PolyBook.objects.create(
title=SOURCE
)
def poly_fetch_book(id, test_for_subclass=True):
book = PolyBook.objects.get(id=id)
try:
# This sink should have 2 sources, from `poly_save_base_book` and
# `poly_save_physical_book`
SINK(book.title) # $ flow="SOURCE, l:-10 -> book.title" flow="SOURCE, l:+24 -> book.title"
# The sink assertion will fail for the PolyEBook, which we handle. The title
# attribute of a PolyBook could be tainted, so we want this to be a sink in general.
except AssertionError:
if book.title == "safe ebook":
pass
else:
raise
if test_for_subclass:
assert isinstance(book, PolyPhysicalBook) or isinstance(book, PolyEBook)
if isinstance(book, PolyPhysicalBook):
SINK(book.title) # $ flow="SOURCE, l:+11 -> book.title" SPURIOUS: flow="SOURCE, l:-23 -> book.title"
SINK(book.physical_location) # $ flow="SOURCE, l:+11 -> book.physical_location"
SINK(book.same_name_different_value) # $ flow="SOURCE, l:+11 -> book.same_name_different_value"
elif isinstance(book, PolyEBook):
SINK_F(book.title) # $ SPURIOUS: flow="SOURCE, l:-27 -> book.title" flow="SOURCE, l:+7 -> book.title"
SINK_F(book.download_link)
SINK_F(book.same_name_different_value) # $ SPURIOUS: flow="SOURCE, l:+7 -> book.same_name_different_value"
def poly_save_physical_book():
return PolyPhysicalBook.objects.create(
title=SOURCE,
physical_location=SOURCE,
same_name_different_value=SOURCE,
)
def poly_fetch_physical_book(id):
book = PolyPhysicalBook.objects.get(id=id)
SINK(book.title) # $ flow="SOURCE, l:-9 -> book.title"
SINK(book.physical_location) # $ flow="SOURCE, l:-9 -> book.physical_location"
SINK(book.same_name_different_value) # $ flow="SOURCE, l:-9 -> book.same_name_different_value"
def poly_save_ebook():
return PolyEBook.objects.create(
title="safe ebook",
download_link="safe",
same_name_different_value="safe",
)
def poly_fetch_ebook(id):
book = PolyEBook.objects.get(id=id)
SINK_F(book.title)
SINK_F(book.download_link)
SINK_F(book.same_name_different_value)
|
478563
|
from setuptools import setup
def parse_requirements(filename):
lines = (line.strip() for line in open(filename))
return [line for line in lines if line and not line.startswith('#')]
setup(name='VideoGPT', version='1.0',
description='PyTorch package for VideoGPT',
url='http://github.com/wilson1yan/VideoGPT',
author='<NAME>',
author_email='<EMAIL>',
license='BSD',
packages=['videogpt'],
install_requires=parse_requirements('requirements.txt'),
zip_safe=True)
|
478564
|
from django.urls import path
from .models import site
urlpatterns = [
path('admin/', site.urls),
]
|
478580
|
from setuptools import setup, find_packages
import numpy as np
from get_version import get_version
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="Scribe",
__version__ = get_version(__file__),
install_requires=['cvxopt>=1.2.3', 'pandas>=0.23.0', 'numpy>=1.14', 'scipy>=1.0', 'scikit-learn>=0.19.1',
'pyccm>=0.4', 'statsmodels>=0.9.0', 'scanpy>=1.3.3', 'anndata>=0.6.18', 'loompy>=2.0.12',
'matplotlib>=2.2', 'seaborn>=0.9.0', 'networkx>=2.3', 'setuptools'],
packages=find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
],
include_dirs=[np.get_include()],
author="<NAME>, <NAME>, <NAME>, <NAME>",
author_email="<EMAIL>",
description='Detect causality from single cell measurements',
long_description=long_description,
long_description_content_type="text/markdown",
license='BSD',
url="https://github.com/aristoteleo/Scribe-py",
download_url=f"https://github.com/aristoteleo/Scribe-py",
keywords=["RNAseq", "singlecell", "network", "causality", "velocity"]
)
|
478699
|
import brownie
MAX_UINT256 = 2 ** 256 - 1
WEEK = 7 * 86400
def test_kick(chain, accounts, gauge_v4_1, mirrored_voting_escrow, voting_escrow, token, mock_lp_token):
alice, bob = accounts[:2]
chain.sleep(2 * WEEK + 5)
mirrored_voting_escrow.set_mirror_whitelist(accounts[0], True, {"from": accounts[0]})
mirrored_voting_escrow.mirror_lock(alice, 250, 0, 5 * 10 ** 19, chain.time() + 4 * WEEK, {"from": accounts[0]})
token.mint(alice, 5 * 10 ** 19)
token.approve(voting_escrow, MAX_UINT256, {"from": alice})
voting_escrow.create_lock(5 * 10 ** 19, chain.time() + 4 * WEEK, {"from": alice})
mock_lp_token.approve(gauge_v4_1.address, MAX_UINT256, {"from": alice})
gauge_v4_1.deposit(10 ** 21, {"from": alice})
assert gauge_v4_1.working_balances(alice) == 10 ** 21
chain.sleep(WEEK)
with brownie.reverts("dev: kick not allowed"):
gauge_v4_1.kick(alice, {"from": bob})
chain.sleep(4 * WEEK)
gauge_v4_1.kick(alice, {"from": bob})
assert gauge_v4_1.working_balances(alice) == 4 * 10 ** 20
with brownie.reverts("dev: kick not needed"):
gauge_v4_1.kick(alice, {"from": bob})
|
478709
|
from datar.base.verbs import complete_cases, proportions
import pytest
from pandas import DataFrame
from datar.base import *
from datar.tibble import tibble
from .conftest import assert_iterable_equal
def test_rowcolnames():
df = DataFrame(dict(x=[1,2,3]))
assert colnames(df) == ['x']
assert rownames(df) == [0, 1, 2]
df = DataFrame([1,2,3], index=['a', 'b', 'c'])
assert colnames(df) == [0]
assert rownames(df) == ['a', 'b', 'c']
df = colnames(df, ['y'])
assert_iterable_equal(df.columns, ['y'])
df = colnames(df, ['y'], _nested=False)
assert_iterable_equal(df.columns, ['y'])
assert_iterable_equal(colnames(df, _nested=False), ['y'])
df = rownames(df, ['a', 'b', 'c'])
assert_iterable_equal(df.index, ['a', 'b', 'c'])
df = tibble(a=tibble(x=1, y=1), b=tibble(u=2, v=3), z=2)
df = df >> colnames(['c', 'd', 'w'], _nested=True)
assert_iterable_equal(df.columns, ['c$x', 'c$y', 'd$u', 'd$v', 'w'])
def test_diag():
out = dim(3 >> diag())
assert out == (3,3)
out = dim(10 >> diag(3, 4))
assert out == (3,4)
x = c(1j,2j) >> diag()
assert x.iloc[0,0] == 0+1j
assert x.iloc[0,1] == 0+0j
assert x.iloc[1,0] == 0+0j
assert x.iloc[1,1] == 0+2j
x = TRUE >> diag(3)
assert sum(x.values.flatten()) == 3
x = c(2,1) >> diag(4)
assert_iterable_equal(x >> diag(), [2,1,2,1])
with pytest.raises(ValueError):
x >> diag(3, 3)
x = 1 >> diag(4)
assert_iterable_equal(x >> diag(3) >> diag(), [3,3,3,3])
def test_ncol():
df = tibble(x=tibble(a=1, b=2))
assert ncol(df) == 1
assert ncol(df, _nested=False) == 2
def test_t():
df = tibble(x=1, y=2)
out = t(df)
assert out.shape == (2, 1)
assert_iterable_equal(out.index, ['x', 'y'])
def test_names():
assert_iterable_equal(names(tibble(x=1)), ['x'])
assert_iterable_equal(names({'x': 1}), ['x'])
assert names({'x':1}, ['y']) == {'y': 1}
def test_setdiff():
assert_iterable_equal(setdiff(1,2), [1])
assert_iterable_equal(setdiff([1,2], [2]), [1])
def test_intersect():
assert_iterable_equal(intersect(1,2), [])
assert_iterable_equal(intersect([1,2], [2]), [2])
def test_union():
assert_iterable_equal(union(1,2), [1,2])
assert_iterable_equal(union([1,2], [2]), [1,2])
def test_setequal():
assert setequal([1,2], [2,1])
assert setequal(1, 1)
def test_duplicated():
assert_iterable_equal(
duplicated([1,1,-1,-1,2,2], incomparables=[-1]),
[False, True, False, False, False, True]
)
assert_iterable_equal(
duplicated([1,1,2,2], from_last=True),
[True, False, True, False]
)
df = tibble(x=[1,1,2,2])
assert_iterable_equal(duplicated(df), [False, True, False, True])
def test_max_col():
df = tibble(
a = [1,7,4],
b = [8,5,3],
c = [6,2,9],
d = [8,7,9]
)
assert_iterable_equal(
max_col(df[["a", "b", "c"]], "random"),
[1,0,2]
)
out = max_col(df, "random")
assert out[0] in [1,3]
assert out[1] in [0,3]
assert out[2] in [2,3]
assert_iterable_equal(
max_col(df, "first"),
[1,0,2]
)
assert_iterable_equal(
max_col(df, "last"),
[3,3,3]
)
def test_complete_cases():
df = tibble(
a = [NA, 1, 2],
b = [4, NA, 6],
c = [7, 8, 9],
)
out = complete_cases(df)
assert_iterable_equal(out, [False, False, True])
def test_append():
out = append([1], 2)
assert_iterable_equal(out, [1,2])
out = append([1,2,3], 4, base0_=True, after=None)
assert_iterable_equal(out, [4, 1,2,3])
def test_proportions():
out = proportions([1,2,3,4])
assert_iterable_equal(out, [.1,.2,.3,.4])
df = tibble(a=[1,2], b=[3,4])
proportions(df).equals(tibble(a=[.1,.2], b=[.3,.4]))
proportions(df, 1).equals(tibble(a=[1./4, 2./6], b=[3./4, 4./6]))
proportions(df, 2).equals(tibble(a=[1./3, 2./3], b=[3./7, 4./7]))
proportions(df, 3).equals(tibble(a=[1,1], b=[1,1]))
|
478740
|
from polyphony import testbench
from polyphony import pipelined
def pipe07(xs, ys):
for i in pipelined(range(len(xs))):
v = xs[i]
if v < 0:
z = (v - 8) >> 4
else:
z = (v + 8) >> 4
ys[i] = z
for i in pipelined(range(len(ys))):
v = ys[i]
#print(i, v)
if v < 0:
z = (v - 8) << 4
else:
z = (v + 8) << 4
xs[i] = z
@testbench
def test():
data = [0, 16, 32, -16, -64]
out = [0] * 5
pipe07(data, out)
assert 0 == out[0]
assert 1 == out[1]
assert 2 == out[2]
assert -2 == out[3]
assert -5 == out[4]
assert 128 == data[0]
assert 144 == data[1]
assert 160 == data[2]
assert -160 == data[3]
assert -208 == data[4]
test()
|
478742
|
class Solution:
def print2largest(self,A,N):
A.sort(reverse=True)
for i in range(1,N):
if A[i]!=A[i-1]:
return A[i]
return -1
t=int(input())
for i in range(t):
n=int(input())
a=list(map(int,input().split()))
ob=Solution()
print(ob.print2largest(a,n))
|
478746
|
from fuel.streams import AbstractDataStream
from fuel.iterator import DataIterator
import numpy as np
import theano
class IMAGENET(AbstractDataStream):
"""
A fuel DataStream for imagenet data
from fuel:
A data stream is an iterable stream of examples/minibatches. It shares
similarities with Python file handles return by the ``open`` method.
Data streams can be closed using the :meth:`close` method and reset
using :meth:`reset` (similar to ``f.seek(0)``).
"""
def __init__(self, partition_label='train', datadir='/home/jascha/data/imagenet/JPEG/', seed=12345, fraction=0.9, width=256, **kwargs):
# ignore axis labels if not given
kwargs.setdefault('axis_labels', '')
# call __init__ of the AbstractDataStream
super(self.__class__, self).__init__(**kwargs)
# get a list of the images
import glob
print "getting imagenet images"
image_files = glob.glob(datadir + "*.JPEG")
print "filenames loaded"
self.sources = ('features',)
self.width = width
# shuffle indices, subselect a fraction
np.random.seed(seed=seed)
np.random.shuffle(image_files)
num_train = int(np.round(fraction * np.float32(len(image_files))))
train_files = image_files[:num_train]
test_files = image_files[num_train:]
if 'train' in partition_label:
self.X = train_files
elif 'test' in partition_label:
self.X = test_files
self.num_examples = len(self.X)
self.current_index = 0
def get_data(self, data_state, request=None):
"""Get a new sample of data"""
if request is None:
request = [self.current_index]
self.current_index += 1
return self.load_images(request)
def apply_default_transformers(self, data_stream):
return data_stream
def open(self):
return None
def close(self):
"""Close the hdf5 file"""
pass
def reset(self):
"""Reset the current data index"""
self.current_index = 0
def get_epoch_iterator(self, **kwargs):
return super(self.__class__, self).get_epoch_iterator(**kwargs)
# return None
# TODO: implement iterator
def next_epoch(self, *args, **kwargs):
self.current_index = 0
return super(self.__class__, self).next_epoch(**kwargs)
# return None
def load_images(self, inds):
print ".",
output = np.zeros((len(inds), 3, self.width, self.width), dtype=theano.config.floatX)
for ii, idx in enumerate(inds):
output[ii] = self.load_image(idx)
return [output]
def load_image(self, idx):
filename = self.X[idx]
import Image
import ImageOps
# print "loading ", self.X[idx]
image = Image.open(self.X[idx])
width, height = image.size
if width > height:
delta2 = int((width - height)/2)
image = ImageOps.expand(image, border=(0, delta2, 0, delta2))
else:
delta2 = int((height - width)/2)
image = ImageOps.expand(image, border=(delta2, 0, delta2, 0))
image = image.resize((self.width, self.width), resample=Image.BICUBIC)
try:
imagenp = np.array(image.getdata()).reshape((self.width,self.width,3))
imagenp = imagenp.transpose((2,0,1)) # move color channels to beginning
except:
# print "reshape failure (black and white?)"
imagenp = self.load_image(np.random.randint(len(self.X)))
return imagenp.astype(theano.config.floatX)
|
478748
|
from pyramid.config import Configurator
def main(global_config, **settings):
config = Configurator(settings=settings)
config.include('ramses')
return config.make_wsgi_app()
|
478764
|
from django.apps import AppConfig
class PolymorphicTestsConfig(AppConfig):
name = 'ralph.lib.polymorphic.tests'
label = 'polymorphic_tests'
|
478787
|
from modules.OverTheShellbag import converter as cv
class TypeList:
# key "sig" will convert list type -> ConverTypeForPython()
# List : ['00', '10', '1f', '20', '2e', '2f', '31', '32', '35', '71', '74', '78', 'b1']
# '31': 1801, '35': 420, '1f': 39, '00': 13,
# '2f': 8, '2e': 7, '74': 6, '71': 4,
# 'b1': 4, '78': 3, '10': 3, '32': 2, '20': 1
SHELL_ITEM_TYPES = [
{"type" : "users_property_view_shell_item", "sig" : b"\x00"},
{"type" : "root_folder_shell_item", "sig" : b"\x1F"},
{"type" : "volume_shell_item", "sig" : (0x20, 0x2F)},
{"type" : "file_entry_shell_item", "sig" : [(0x30, 0x3F), b"\xB1"]},
{"type" : "network_location_shell_item", "sig" : (0x40, 0x4F)},
{"type" : "control_panel_shell_item", "sig" : b"\x71"},
{"type" : "control_panel_category_shell_item", "sig" : b"\x01"},
]
FILE_EXTENSION_BLOCK_TYPES = [
{"type" : "WinXP", "sig" : b"\x03"},
{"type" : "WinVista", "sig" : b"\x07"},
{"type" : "Win7", "sig" : b"\x08"},
{"type" : "Win81", "sig" : b"\x09"}
]
def GetListOnly():
class_resource = TypeList.__dict__
type_list = []
for key in class_resource:
if key.find("TYPES") != -1:
type_list.append(class_resource[key])
return type_list
def TupleInterpreter(_type_data_sig_tuple):
(start, end) = _type_data_sig_tuple
sig_list = []
for sig_data in range(start, end + 1):
hexString_sig = cv.AddPaddingHex(hex(sig_data))
bytes_sig = bytes.fromhex(hexString_sig)
sig_list.append(bytes_sig)
return sig_list
def ConvertTypeForPython():
type_list = GetListOnly()
for _type_data in type_list:
for type_data in _type_data:
if type(type_data["sig"]) is bytes:
type_data["sig"] = [type_data["sig"]]
elif type(type_data["sig"]) is list:
type_data_sig_tuple = type_data["sig"][0]
added_sig_list = type_data["sig"][1:]
sig_list = TupleInterpreter(type_data_sig_tuple)
for added_sig in added_sig_list:
sig_list.append(added_sig)
type_data["sig"] = sig_list
else:
type_data["sig"] = TupleInterpreter(type_data["sig"])
ConvertTypeForPython()
|
478796
|
import torch
from skimage.morphology import disk
@torch.no_grad()
def binary_dilation(im: torch.Tensor, kernel: torch.Tensor):
assert len(im.shape) == 4
assert len(kernel.shape) == 2
kernel = kernel.unsqueeze(0).unsqueeze(0)
padding = kernel.shape[-1]//2
assert kernel.shape[-1] % 2 != 0
if torch.cuda.is_available():
im, kernel = im.half(), kernel.half()
else:
im, kernel = im.float(), kernel.float()
im = torch.nn.functional.conv2d(
im, kernel, groups=im.shape[1], padding=padding)
im = im.clamp(0, 1).bool()
return im
def test_dilation():
from skimage.morphology import binary_dilation as skimage_binary_dilation
from kornia.morphology import dilation as kornia_dilation
import numpy as np
import time
im = np.random.randint(0, 1, size=(512, 512)).astype(np.bool)
sizes = [3, 9, 21, 91]
for s in sizes:
kernel_np = disk(s).astype(np.bool)
kernel_torch = torch.from_numpy(kernel_np)
im_torch = torch.from_numpy(im)[None, None]
s = time.time()
result_skimage = skimage_binary_dilation(im, kernel_np)
print("Skimage", time.time() - s)
s = time.time()
result_kornia = kornia_dilation(im_torch.float(), kernel_torch.float()).bool().cpu().numpy().squeeze()
print("Kornia", time.time() - s)
s = time.time()
result_ours = binary_dilation(im_torch, kernel_torch).cpu().numpy().squeeze()
print("Ours", time.time() - s)
np.testing.assert_almost_equal(result_skimage, result_kornia)
np.testing.assert_almost_equal(result_skimage, result_ours)
if __name__ == "__main__":
test_dilation()
|
478798
|
from django.conf import settings
from django.contrib.auth import get_user_model
from django.db import models
from django.utils import timezone
from django.utils.module_loading import import_string
from django.utils.translation import ugettext_lazy as _
from django.core.exceptions import ValidationError
from resources.models import Reservation, Resource
from respa_exchange.ews.objs import ItemID
from respa_exchange.ews.session import SoapFault
User = get_user_model()
class ExchangeConfiguration(models.Model):
"""
Encapsulates a configuration for a particular Exchange installation.
"""
name = models.CharField(
verbose_name=_('name'),
unique=True,
max_length=70,
help_text=_('a descriptive name for this Exchange configuration')
)
url = models.URLField(
verbose_name=_('EWS URL'),
help_text=_('the URL to the Exchange Web Service (e.g. https://contoso.com/EWS/Exchange.asmx)')
)
username = models.CharField(
verbose_name=_('username'),
max_length=64,
help_text=_('the service user to authenticate as, in domain\\username format')
)
password = models.CharField(
verbose_name=_('password'),
max_length=256,
help_text=_('the user\'s password (stored as plain-text)'),
)
enabled = models.BooleanField(
verbose_name=_('enabled'),
default=True,
db_index=True,
help_text=_('whether synchronization is enabled at all against this Exchange instance')
)
def __str__(self):
return self.name
class Meta:
verbose_name = _("Exchange configuration")
verbose_name_plural = _("Exchange configurations")
def get_ews_session(self):
"""
Get a configured EWS session.
:rtype: respa_exchange.ews.session.ExchangeSession
"""
if hasattr(self, '_ews_session'):
return self._ews_session
session_class = import_string(
getattr(settings, "RESPA_EXCHANGE_EWS_SESSION_CLASS", "respa_exchange.ews.session.ExchangeSession")
)
self._ews_session = session_class(
url=self.url,
username=self.username,
password=self.password,
)
return self._ews_session
class ExchangeResource(models.Model):
"""
Links a Respa resource to an Exchange calendar.
"""
exchange = models.ForeignKey(
verbose_name=_('Exchange configuration'),
to=ExchangeConfiguration,
on_delete=models.PROTECT,
related_name='resources',
)
resource = models.OneToOneField(
verbose_name=_('resource'),
to=Resource,
on_delete=models.PROTECT,
related_name='exchange_resource',
)
sync_to_respa = models.BooleanField(
verbose_name=_('sync Exchange to Respa'),
help_text=_('if disabled, events will not be synced from the Exchange calendar to Respa'),
default=True,
db_index=True
)
sync_from_respa = models.BooleanField(
verbose_name=_('sync Respa to Exchange'),
help_text=_(
'if disabled, new events will not be synced from Respa to the Exchange calendar; pre-existing events continue to be updated'),
default=True,
db_index=True
)
principal_email = models.EmailField(
verbose_name=_('principal email'),
unique=True,
help_text=_('the email address for this resource in Exchange')
)
class Meta:
verbose_name = _("Exchange resource")
verbose_name_plural = _("Exchange resources")
def __str__(self):
return "%s (%s)" % (self.principal_email, self.resource)
def clean(self):
from .downloader import sync_from_exchange
super().clean()
if self.sync_from_respa or self.sync_to_respa:
try:
sync_from_exchange(self, future_days=1, no_op=True)
except SoapFault as fault:
raise ValidationError('Exchange error: %s' % str(fault))
@property
def reservations(self):
"""
Get a queryset of ExchangeReservations for this resource
:rtype: django.db.models.QuerySet[ExchangeReservation]
"""
return ExchangeReservation.objects.filter(reservation__resource=self.resource)
class ExchangeReservation(models.Model):
"""
Links a Respa reservation with its Exchange item information.
"""
reservation = models.OneToOneField(
Reservation,
on_delete=models.DO_NOTHING, # The signal will (hopefully) deal with this
editable=False,
related_name='exchange_reservation',
)
item_id_hash = models.CharField(
# The MD5 hash of the item ID; results in shorter (=faster) DB indexes
max_length=32,
db_index=True,
editable=False
)
organizer = models.ForeignKey('ExchangeUser', editable=False, null=True,
related_name='reservations', on_delete=models.PROTECT)
exchange = models.ForeignKey( # Cached Exchange configuration
to=ExchangeConfiguration,
on_delete=models.PROTECT,
editable=False,
related_name='reservations',
)
managed_in_exchange = models.BooleanField( # Whether or not this reservation came from Exchange
db_index=True,
editable=False,
default=False
)
principal_email = models.EmailField(editable=False) # Cached resource principal email
_item_id = models.CharField(max_length=200, blank=True, editable=False, db_column='item_id')
_change_key = models.CharField(max_length=100, blank=True, editable=False, db_column='change_key')
created_at = models.DateTimeField(
verbose_name=_('time of creation'),
default=timezone.now,
editable=False
)
modified_at = models.DateTimeField(
verbose_name=_('time of modification'),
default=timezone.now,
editable=False
)
class Meta:
verbose_name = _("Exchange reservation")
verbose_name_plural = _("Exchange reservations")
def __str__(self):
return "ExchangeReservation %s for %s (%s)" % (self.pk, self.reservation, self.principal_email)
def save(self, *args, **kwargs):
if not self.exchange_id:
self.exchange = ExchangeResource.objects.get(resource=self.reservation.resource).exchange
self.clean()
return super(ExchangeReservation, self).save(*args, **kwargs)
@property
def item_id(self):
"""
Retrieve the ExchangeReservation's related appointment's item ID object
:rtype: respa_exchange.objs.ItemID
"""
return ItemID(id=self._item_id, change_key=self._change_key)
def find_organizer_user(self):
if not self.organizer_email:
return None
try:
user = User.objects.get(email=self.organizer_email)
except User.DoesNotExist:
return None
return user
@item_id.setter
def item_id(self, value):
assert isinstance(value, ItemID)
if self._item_id and self._item_id != value.id:
raise ValueError("Can't mutate a reservation's item ID!")
self._item_id = value.id
self._change_key = value.change_key
self.item_id_hash = value.hash
class ExchangeUser(models.Model):
exchange = models.ForeignKey(
verbose_name=_('Exchange configuration'),
to=ExchangeConfiguration,
on_delete=models.PROTECT,
related_name='users',
)
email_address = models.CharField(max_length=200, db_index=True)
name = models.CharField(max_length=100)
given_name = models.CharField(max_length=100, null=True, blank=True)
surname = models.CharField(max_length=100, null=True, blank=True)
user = models.OneToOneField(User, null=True, db_index=True, related_name='exchange_user',
on_delete=models.SET_NULL)
updated_at = models.DateTimeField(auto_now=True, null=True)
def __str__(self):
return self.name
class Meta:
unique_together = (('exchange', 'email_address'),)
class ExchangeUserX500Address(models.Model):
exchange = models.ForeignKey(ExchangeConfiguration, on_delete=models.CASCADE, related_name='x500_addresses')
user = models.ForeignKey(ExchangeUser, on_delete=models.CASCADE, related_name='x500_addresses')
address = models.CharField(max_length=200, null=True, blank=True, db_index=True)
def __str__(self):
return self.address
class Meta:
unique_together = (('exchange', 'address'),)
|
478800
|
class A:
def __init__(self, x):
self.x = x
def __add__(self, y):
return self.x + y
def __len__(self):
return 10
def __iter__(self):
return iter(range(10))
def __str__(self):
return "bla"
a = A(3)
assert a + 1 == 4
assert len(a) == 10
i = 0
for j in a:
assert i == j
i = i + 1
assert str(a) == "bla"
print("ok")
|
478819
|
import fault
import aetherling.helpers.fault_helpers as fault_helpers
from aetherling.space_time import *
from aetherling.space_time.reshape_st import DefineReshape_ST
import magma as m
import json
@cache_definition
def Module_0() -> DefineCircuitKind:
class _Module_0(Circuit):
name = "top"
IO = ['I', In(ST_SSeq(4, ST_SSeq(1, ST_Int(8, True))).magma_repr()),'O', Out(ST_SSeq(4, ST_SSeq(4, ST_Int(8, True))).magma_repr())] + ClockInterface(has_ce=False,has_reset=False) + valid_ports
st_in_t = [ST_SSeq(4, ST_SSeq(1, ST_Int(8, True)))]
st_out_t = ST_SSeq(4, ST_SSeq(4, ST_Int(8, True)))
binary_op = False
@classmethod
def definition(cls):
n1 = DefineFIFO(ST_SSeq(4, ST_SSeq(1, ST_Int(8, True))), 1, has_valid=True)()
wire(cls.I, n1.I)
wire(cls.valid_up, n1.valid_up)
n6 = DefineMap_S(4, DefineMap_S(1, DefineAbs_Atom(True),True),True)()
wire(n1.O, n6.I)
wire(n1.valid_down, n6.valid_up)
n9 = DefineMap_S(4, DefineUp_S(4, ST_Int(8, True), has_valid=True),True)()
wire(n6.O, n9.I)
wire(n6.valid_down, n9.valid_up)
n10 = DefineFIFO(ST_SSeq(4, ST_SSeq(4, ST_Int(8, True))), 1, has_valid=True)()
wire(n9.O, n10.I)
wire(n9.valid_down, n10.valid_up)
n11 = DefineFIFO(ST_SSeq(4, ST_SSeq(4, ST_Int(8, True))), 1, has_valid=True)()
wire(n10.O, n11.I)
wire(n10.valid_down, n11.valid_up)
n12 = DefineFIFO(ST_SSeq(4, ST_SSeq(4, ST_Int(8, True))), 1, has_valid=True)()
wire(n11.O, n12.I)
wire(n11.valid_down, n12.valid_up)
wire(n12.O, cls.O)
wire(n12.valid_down, cls.valid_down)
return _Module_0
Main = Module_0
fault_helpers.compile(Main(), "v./home/durst/dev/embeddedHaskellAetherling//test/no_bench/magma_examples/nested_map_to_nested_up/nested_map_to_nested_up_16 % 1thr.py")
|
478832
|
import numpy as np
def make_epsilon_greedy_policy(Q, epsilon, nA):
"""
Creates an epsilon-greedy policy based on a given Q-function and epsilon.
Args:
Q: A dictionary that maps from state -> action-values.
Each value is a numpy array of length nA (see below)
epsilon: The probability to select a random action . float between 0 and 1.
nA: Number of actions in the environment.
Returns:
A function that takes the state as an argument and returns
the probabilities for each action in the form of a numpy array of length nA.
"""
def policy_fn(state):
# 1 / epsilon for non-greedy actions
probs = (epsilon / nA) * np.ones(nA)
greedy_action = Q[state].argmax()
# (1 / epsilon + (1 - epsilon)) for greedy action
probs[greedy_action] += 1.0 - epsilon
return probs
return policy_fn
def make_greedy_policy(Q, nA):
"""
Creates a greedy policy based on Q values.
Args:
Q: A dictionary that maps from state -> action values
nA: Number of actions in the environment.
Returns:
A function that takes an observation as input and returns a vector
of action probabilities.
"""
def policy_fn(observation):
probs = np.zeros(nA)
greedy_action = Q[observation].argmax()
probs[greedy_action] = 1.0
return probs
return policy_fn
|
478855
|
from http import HTTPStatus
from typing import Dict, Tuple
from flask import jsonify, request, abort
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm import joinedload
from db.access.files import FilesDAO
from db.schema import Matches, Files
from .blueprint import api
from .constants import ValidationErrors
from .helpers import parse_positive_int, Fields, parse_fields, parse_boolean
from ..model import Transform, database
# Optional file fields
FILE_FIELDS = Fields(Files.exif, Files.signature, Files.meta, Files.scenes)
@api.route("/files/<int:file_id>/matches", methods=["GET"])
def list_file_matches(file_id):
limit = parse_positive_int(request.args, "limit", 20)
offset = parse_positive_int(request.args, "offset", 0)
include_fields = parse_fields(request.args, "include", FILE_FIELDS)
remote = parse_boolean(request.args, "remote")
false_positive = parse_boolean(request.args, "false_positive")
file = database.session.query(Files).get(file_id)
# Handle file not found
if file is None:
abort(HTTPStatus.NOT_FOUND.value, f"File id not found: {file_id}")
query = FilesDAO.file_matches(file_id, database.session, false_positive=false_positive).options(
joinedload(Matches.match_video_file), joinedload(Matches.query_video_file)
)
if not remote:
query = query.filter(Matches.match_video_file.has(Files.contributor == None)) # noqa: E711
query = query.filter(Matches.query_video_file.has(Files.contributor == None)) # noqa: E711
# Preload file fields
query = FILE_FIELDS.preload(query, include_fields, Matches.match_video_file)
query = FILE_FIELDS.preload(query, include_fields, Matches.query_video_file)
# Get requested slice
total = query.count()
items = query.offset(offset).limit(limit).all()
include_flags = {field.key: True for field in include_fields}
mother_file = Transform.file(file, **include_flags)
mother_file["matches_count"] = FilesDAO.file_matches(file_id, database.session).count()
return jsonify(
{
"items": [Transform.file_match(item, file_id, **include_flags) for item in items],
"mother_file": mother_file,
"total": total,
"offset": offset,
}
)
def validate_update_match_dto(data: Dict) -> Tuple[str, Dict[str, str]]:
"""Validate update-match DTO.
Returns:
error message and a dict of invalid fields -> error codes.
"""
expected_fields = {"false_positive"}
actual_fields = set(data.keys())
if not actual_fields <= expected_fields:
return f"Payload can include only the following fields: {expected_fields}", {}
if "false_positive" in data:
false_positive = data["false_positive"]
if false_positive is None:
return "false_positive cannot be null", {"false_positive": ValidationErrors.INVALID_VALUE.value}
if not isinstance(false_positive, bool):
return "false_positive cannot be boolean", {"false_positive": ValidationErrors.INVALID_VALUE.value}
return None, {}
@api.route("/matches/<int:match_id>", methods=["PATCH"])
def update_match(match_id):
# Fetch match from database
match = database.session.query(Matches).filter(Matches.id == match_id).one_or_none()
# Handle match not found
if match is None:
abort(HTTPStatus.NOT_FOUND.value, f"Match id not found: {match_id}")
# Get payload
request_payload = request.get_json()
if request_payload is None:
abort(HTTPStatus.BAD_REQUEST.value, "Expected valid 'application/json' payload.")
# Validate payload
error, fields = validate_update_match_dto(request_payload)
if error is not None:
return (
jsonify({"error": error, "code": HTTPStatus.BAD_REQUEST.value, "fields": fields}),
HTTPStatus.BAD_REQUEST.value,
)
match.false_positive = request_payload.get("false_positive", match.false_positive)
try:
database.session.commit()
except IntegrityError:
abort(HTTPStatus.BAD_REQUEST.value, "Data integrity violation.")
return jsonify(Transform.match(match))
@api.route("/files/<int:file_id>/matches/<int:match_id>", methods=["PATCH"])
def update_file_match(file_id, match_id):
include_fields = parse_fields(request.args, "include", FILE_FIELDS)
file = database.session.query(Files).filter(Files.id == file_id).one_or_none()
# Handle file not found
if file is None:
abort(HTTPStatus.NOT_FOUND.value, f"File id not found: {file_id}")
# Fetch match from database
match = database.session.query(Matches).filter(Matches.id == match_id).one_or_none()
# Handle match not found
if match is None:
abort(HTTPStatus.NOT_FOUND.value, f"Match id not found: {match_id}")
# Get payload
request_payload = request.get_json()
if request_payload is None:
abort(HTTPStatus.BAD_REQUEST.value, "Expected valid 'application/json' payload.")
# Validate payload
error, fields = validate_update_match_dto(request_payload)
if error is not None:
return (
jsonify({"error": error, "code": HTTPStatus.BAD_REQUEST.value, "fields": fields}),
HTTPStatus.BAD_REQUEST.value,
)
match.false_positive = request_payload.get("false_positive", match.false_positive)
try:
database.session.commit()
except IntegrityError:
abort(HTTPStatus.BAD_REQUEST.value, "Data integrity violation.")
include_flags = {field.key: True for field in include_fields}
return jsonify(Transform.file_match(match, file_id, **include_flags))
|
478881
|
from django.db import models
class Ambiguous(models.Model):
name = models.CharField(max_length=20)
|
478992
|
import re
import random
import sys
import traceback
import ast
import operator
import time
import json
from simpleeval import SimpleEval, DEFAULT_OPERATORS, DEFAULT_FUNCTIONS, DEFAULT_NAMES
from . import euphutils
from . import logger
log = logger.Logger()
# functions and operators that can be used in ${math} syntax
EVAL_FUNCTIONS = DEFAULT_FUNCTIONS.copy()
EVAL_FUNCTIONS.update({'bool': bool, 'repr': repr, 'to_json': lambda x: json.dumps(x), 'from_json': lambda x: json.loads(x), 'len': len, 'mention': euphutils.mention, 'unwhitespace': lambda x: re.sub(r'\s', '_', x), 'time': time.time, 'eval': None})
EVAL_OPERATORS = DEFAULT_OPERATORS.copy()
EVAL_OPERATORS.update({ast.Not: operator.not_, ast.In: lambda a, b: a in b, ast.NotIn: lambda a, b: a not in b, ast.Is: operator.is_, ast.IsNot: operator.is_not})
# init pseudo-regex; i.e. if a bot has this as the regex string, its response
# will be triggered on the bot's initialization
INIT_TRIGGER = '(+init)'
class Parser:
def __init__(self, parse_string, dbginfo=None):
temp = ''
self.array = []
self.parse_string = parse_string
self.dbginfo = dbginfo or 'N/A'
self.variables = {}
regex_mode = True
i = 0
while i < len(parse_string) and re.match(r'\s', parse_string[i]):
i += 1
while i < len(parse_string):
if regex_mode:
arrow_match = re.match(r'\s*->\s*', parse_string[i:])
if arrow_match:
i += len(arrow_match.group(0))
regex_mode = False
if i >= len(parse_string):
self.array.append([temp, [0, '']])
else:
self.array.append([temp])
temp = ''
else:
temp += parse_string[i]
i += 1
else:
temp = self.parse_response_string(parse_string[i:])
self.array[-1].append(temp[0])
i += len(temp[1])
temp = ''
whitespace_match = re.match('\s*', parse_string[i:])
if whitespace_match:
i += len(whitespace_match.group(0))
regex_mode = True
def load_array(self, array):
self.array = array
def get_init_messages(self):
for raw_regex_string, response_data in self.array:
if raw_regex_string.lower() == INIT_TRIGGER:
messages = self.parse_entry(response_data)
# just in case someone is relying on groups[0] existing
self.variables['groups'] = ['']
yield from messages
def get_messages(self, content, sender):
for raw_regex_string, response_data in self.array:
if raw_regex_string.lower() == INIT_TRIGGER:
continue
regex_string = ''
i = 0
while i < len(raw_regex_string):
if raw_regex_string[i:].startswith('${'):
i += 2
# parse the variable name as if it was part of a response
parsed = self.parse_response_string(raw_regex_string[i:], 3)
i += len(parsed[1])
variable = next(self.parse_entry(parsed[0]), '')
regex_string += re.escape(variable)
else:
regex_string += raw_regex_string[i]
i += 1
try:
regex = re.compile(regex_string, re.IGNORECASE)
except re.error as e:
log.write('Bad regular expression {!r} ({!s}), ignoring. ({!s})'.format(regex_string, e, self.dbginfo))
continue
match = regex.search(content)
if match:
messages = self.parse_entry(response_data)
self.variables['groups'] = list(match.groups())
self.variables['groups'].insert(0, match.group(0))
groups = tuple(reversed(match.groups('')))
groups = tuple(zip(map('\\{0}'.format, range(len(groups), 0, -1)), groups))
for message in messages:
for backreference, group in groups:
message = message.replace(backreference, group)
yield message
def get_regexes(self):
return list(map(lambda entry: entry[0], self.array))
def parse_entry(self, parsed_data):
if parsed_data[0] == 0: # concatenation
if len(parsed_data) == 1:
return
element = parsed_data[1]
remainder = parsed_data[2:]
remainder.insert(0, 0)
if type(element) is str:
result = None
for result in self.parse_entry(remainder):
yield element + result
if result is None:
yield element
else:
element_result = None
for element_result in self.parse_entry(element):
remainder_result = None
for remainder_result in self.parse_entry(remainder):
yield element_result + remainder_result
if remainder_result is None:
yield element_result
if element_result is None:
for remainder_result in self.parse_entry(remainder):
yield remainder_result
elif parsed_data[0] == 1: # random choice [a,b,c]
element = parsed_data[random.randint(1, len(parsed_data) - 1)]
if type(element) is not str:
for result in self.parse_entry(element):
yield result
else:
yield element
elif parsed_data[0] == 2: # multiple response {a,b,c}
for element in parsed_data[1:]:
if type(element) is str:
yield element
else:
for result in self.parse_entry(element):
yield result
elif parsed_data[0] == 3: # dynamic variable ${variable}
variable_name = parsed_data[1]
if type(variable_name) is not str:
variable_name = next(self.parse_entry(variable_name), '')
evaluator = SimpleEval(names=self.variables.copy(), functions=EVAL_FUNCTIONS, operators=EVAL_OPERATORS)
evaluator.names['variables'] = evaluator.names
evaluator.functions['eval'] = evaluator.eval
try:
yield str(evaluator.eval(variable_name))
except GeneratorExit:
pass
except Exception:
yield '[Error: {0}]'.format(''.join(traceback.format_exception_only(sys.exc_info()[0], sys.exc_info()[1])).strip())
def parse_response_string(self, data, datatype=0):
parsed = [datatype]
# possible expression types:
# 0 = concatenation
# 1 = random choice [a,b,c]
# 2 = multiple response {a,b,c}
# 3 = dynamic variable ${variable}
start = {'[': 1, '{': 2, '${': 3}
end = {0: ';', 1: ']', 2: '}', 3: '}'}
startchars = tuple(start.keys())
endchar = end[datatype]
i = 0
separate = True
separatable = {0: False, 1: True, 2: True, 3: False}[datatype]
allow_nesting = {0: True, 1: True, 2: True, 3: False}[datatype]
while i < len(data):
if separate and re.match(r'\s', data[i]):
i += 1
continue
elif data[i] == endchar:
if separate:
parsed.append('')
return (parsed, data[:i + 1])
elif allow_nesting and data[i:].startswith(startchars):
startchar = next(char for char in startchars if data[i:].startswith(char))
expression_type = start[startchar]
i += len(startchar)
subparsed = self.parse_response_string(data[i:], expression_type)
i += len(subparsed[1])
if separate or parsed[0] == 0:
parsed.append(subparsed[0])
separate = False
else:
if type(parsed[-1]) is list and parsed[-1][0] == 0:
parsed[-1].append(subparsed[0])
else:
parsed[-1] = [0, parsed[-1], subparsed[0]]
continue
elif separatable and data[i] == ',':
if separate:
parsed.append('')
separate = True
i += 1
continue
elif data[i] == '\\':
i += 1
if re.match(r'\d', data[i]):
# This backslash is for a backreference. Insert the backslash literally.
if type(parsed[-1]) is str and not separate:
parsed[-1] += '\\'
elif parsed[0] != 0 and not separate:
if type(parsed[-1]) is list and parsed[-1][0] == 0:
if type(parsed[-1][-1]) is str:
parsed[-1][-1] += '\\'
else:
parsed[-1].append('\\')
else:
parsed[-1] = [0, parsed[-1], '\\']
else:
parsed.append('\\')
separate = False
# Insert the character after the backslash literally.
if type(parsed[-1]) is str and not separate:
parsed[-1] += data[i]
elif parsed[0] != 0 and not separate:
if type(parsed[-1]) is list and parsed[-1][0] == 0:
if type(parsed[-1][-1]) is str:
parsed[-1][-1] += data[i]
else:
parsed[-1].append(data[i])
else:
parsed[-1] = [0, parsed[-1], data[i]]
else:
parsed.append(data[i])
separate = False
i += 1
continue
else:
if type(parsed[-1]) is str and not separate:
parsed[-1] += data[i]
elif parsed[0] != 0 and not separate:
if type(parsed[-1]) is list and parsed[-1][0] == 0:
if type(parsed[-1][-1]) is str:
parsed[-1][-1] += data[i]
else:
parsed[-1].append(data[i])
else:
parsed[-1] = [0, parsed[-1], data[i]]
else:
parsed.append(data[i])
separate = False
i += 1
if separate:
parsed.append('')
return (parsed, data)
|
479038
|
import threading
import boto3
class Boto3Profile(object):
"""Store parameters for creating a boto3 profile"""
session_cache = dict()
lock = threading.Lock()
def __init__(self, profile_name, region_name):
self.profile_name = profile_name
self.region_name = region_name
def update(self, other):
"""Update profile with value from another one"""
assert isinstance(other, Boto3Profile)
if other.profile_name:
self.profile_name = other.profile_name
if other.region_name:
self.region_name = other.region_name
def get_boto3_session(self):
session_key = self.region_name, self.profile_name
# cache the session object as the majority of stacks
# are staying in one region
with self.lock:
if session_key not in self.session_cache:
self.session_cache[session_key] = boto3.Session(
profile_name=self.profile_name,
region_name=self.region_name,
)
return self.session_cache[session_key]
def __repr__(self):
return '{}({}/{})'.format(self.__class__.__name__, self.profile_name,
self.region_name)
|
479070
|
from .nodes import *
class Type:
def __str__(self):
return '%s(size=%d)' % (self.__class__.__name__, self.size)
def __repr__(self):
return self.__str__()
@property
def raw(self):
return self
class IntType(Type):
def __init__(self):
self.size = 1
self.suffix = ''
class PartialIntType(IntType):
def __init__(self, suffix, size_bytes):
super().__init__()
self.suffix = suffix
self.s_bytes = size_bytes
def __str__(self):
return super().__str__() + '[%s]' % self.suffix
class Pointer(Type):
def __init__(self, type):
self.size = 1
self.type = type
def __str__(self):
return 'Pointer(%s)' % self.type
class DecoratedType(Type):
def __init__(self, type, static, const):
self.size = type.size
self.type = type
self.static = static
self.const = const
@property
def raw(self):
return self.type
def __str__(self):
s = self.type.__str__()
s += '[static=%s,const=%s]' % (self.static, self.const)
return s
class ArrayType(Type):
def __init__(self, type, size):
self.type = type
self.arr_size = size
self.size = type.size * size
def __str__(self):
return '%d*[%s]' % (self.arr_size, self.type)
class FunctionType(Type):
def __init__(self, ret_type, param_types, is_varargs):
self.size = 0
self.type = ret_type
self.param_types = param_types
self.is_varargs = is_varargs
def __str__(self):
param_types = list(map(str, self.param_types))
if self.is_varargs:
param_types.append('...')
return '%s(%s)' % (self.type.__str__(), ', '.join(param_types))
class VoidType(Type):
def __init__(self):
self.size = 0
class StructType(Type):
def __init__(self, members):
self.members = members
self.name_to_offset = {}
self.name_to_type = {}
self.name_to_index = {}
self.index_to_name = {}
offset = 0
idx = 0
for member in members:
type, name = member
assert name not in self.name_to_type
self.name_to_offset[name] = offset
self.name_to_type[name] = type
self.index_to_name[idx] = name
self.name_to_index[name] = idx
offset += type.size
idx += 1
self.size = offset
def __str__(self):
s = super().__str__()
s += ' {\n'
for type, name in self.members:
s += ' %s: %s\n' % (name, type)
s += '}'
return s
class Types:
def __init__(self):
self.types = {
'int': IntType(),
'char': PartialIntType('B', 1),
'short': PartialIntType('W', 2),
'void': VoidType()
}
self.structs = {}
def add_type(self, name, type_):
assert name not in self.types
assert isinstance(type_, Type)
self.types[name] = type_
def from_spec(self, type_spec):
type = self.major(type_spec.type)
is_static = type_spec.store == Keyword.STATIC
is_const = type_spec.qual == Keyword.CONST
if not is_static and not is_const:
return type
return DecoratedType(type, is_static, is_const)
def major(self, type):
if isinstance(type, IdentifierExpr):
return self.types[type.val]
elif isinstance(type, StructTypeRef):
return self.structs[type.name.val]
elif isinstance(type, StructSpec):
return self.define_struct(type)
else:
assert False, "Unknown type %s" % type.__class__.__name__
def define_struct(self, spec):
struct_name = spec.name.val if spec.name is not None else None
assert struct_name is None or struct_name not in self.structs
assert len(spec.decl)
members = []
for member in spec.decl:
major = self.major(member.spec)
assert len(member.decl), "Members must have a name"
for decl in member.decl:
is_array = isinstance(decl.name_spec, ArrayDeclSpec)
array_size = None
if is_array:
assert isinstance(decl.name_spec.dim, IntLiteral)
array_size = decl.name_spec.dim.val
type = self.effective(major, decl.pointer_depth, is_array, False, array_size)
name = self.get_name_for(decl.name_spec)
members.append((type, name))
struct = StructType(members)
if struct_name is not None:
self.structs[struct_name] = struct
return struct
def get_name_for(self, spec):
if isinstance(spec, ArrayDeclSpec):
return spec.name.val
elif isinstance(spec, FuncDeclSpec):
return spec.name.val
else:
return spec.val
def effective(self, type, ptr, is_array, is_function, array_size=None,
func_params=None, is_varargs=False):
for _ in range(ptr):
type = Pointer(type)
if is_array:
if array_size is None:
type = Pointer(type) # TODO index offset
else:
type = ArrayType(type, array_size)
if is_function:
type = FunctionType(type, tuple(func_params), is_varargs)
return type
|
479082
|
import logging
import random
from datetime import timedelta
from django.contrib.auth import get_user_model
from django.contrib.postgres.fields import JSONField
from django.core.exceptions import ImproperlyConfigured
from django.db import models, transaction
from django.utils import timezone
from django.utils.module_loading import import_string
from django.utils.translation import ugettext_lazy as _
from resources.models import Resource
User = get_user_model()
logger = logging.getLogger(__name__)
DRIVERS = (
('sipass', 'Siemens SiPass', 'kulkunen.drivers.sipass.SiPassDriver'),
)
driver_classes = {}
class AccessControlUserQuerySet(models.QuerySet):
def active(self):
m = self.model
return self.filter(state=m.INSTALLED)
class AccessControlUser(models.Model):
INSTALLED = 'installed'
REMOVED = 'removed'
STATE_CHOICES = (
(INSTALLED, _('installed')),
(REMOVED, _('removed')),
)
system = models.ForeignKey('AccessControlSystem', related_name='users', on_delete=models.CASCADE)
user = models.ForeignKey(
User, related_name='access_control_users', on_delete=models.SET_NULL, null=True, blank=True
)
state = models.CharField(
max_length=20, choices=STATE_CHOICES, default=INSTALLED, editable=False,
help_text=_('State of the user')
)
first_name = models.CharField(max_length=100, null=True, blank=True)
last_name = models.CharField(max_length=100, null=True, blank=True)
created_at = models.DateTimeField(auto_now_add=True)
removed_at = models.DateTimeField(null=True, blank=True)
identifier = models.CharField(
max_length=100, null=True, blank=True, verbose_name=_('identifier'),
help_text=_('Identifier of user in the access control system (if any)')
)
driver_data = JSONField(null=True, blank=True)
objects = AccessControlUserQuerySet.as_manager()
class Meta:
index_together = (('system', 'state'),)
def __str__(self) -> str:
name = ' '.join([x for x in (self.first_name, self.last_name) if x])
user_uuid = str(self.user.uuid) if self.user.uuid else _("[No identifier]")
if name:
return _("{uuid}: {name}").format(uuid=user_uuid, name=name)
else:
return _("{uuid}").format(uuid=user_uuid)
class AccessControlGrantQuerySet(models.QuerySet):
def active(self):
m = self.model
return self.filter(state__in=(m.REQUESTED, m.INSTALLING, m.INSTALLED, m.REMOVING))
class AccessControlGrant(models.Model):
REQUESTED = 'requested'
INSTALLING = 'installing'
INSTALLED = 'installed'
CANCELLED = 'cancelled'
REMOVING = 'removing'
REMOVED = 'removed'
STATE_CHOICES = (
(REQUESTED, _('requested')),
(INSTALLED, _('installed')),
(CANCELLED, _('cancelled')),
(REMOVING, _('removing')),
(REMOVED, _('removed')),
)
user = models.ForeignKey(
AccessControlUser, related_name='grants', null=True, blank=True, on_delete=models.SET_NULL
)
resource = models.ForeignKey('AccessControlResource', related_name='grants', on_delete=models.CASCADE)
# If a Respa reservation is deleted, it will be marked as None here.
# AccessControlReservation with reservation == None should be deleted.
reservation = models.ForeignKey(
'resources.Reservation', on_delete=models.SET_NULL, null=True, related_name='access_control_grants'
)
starts_at = models.DateTimeField(null=True, blank=True)
ends_at = models.DateTimeField(null=True, blank=True)
# These are set if the grants are to be installed and removed at a later time.
install_at = models.DateTimeField(null=True, blank=True)
remove_at = models.DateTimeField(null=True, blank=True)
created_at = models.DateTimeField(auto_now_add=True)
removed_at = models.DateTimeField(auto_now_add=True)
access_code = models.CharField(verbose_name=_('access code'), max_length=32, null=True, blank=True)
state = models.CharField(
max_length=20, choices=STATE_CHOICES, default=REQUESTED, editable=False,
help_text=_('State of the grant')
)
identifier = models.CharField(
max_length=100, null=True, blank=True, verbose_name=_('identifier'),
help_text=_('Identifier of grant in the access control system (if any)')
)
installation_failures = models.PositiveIntegerField(
default=0, editable=False, help_text=_('How many times the system has tried to install this grant and failed')
)
removal_failures = models.PositiveIntegerField(
default=0, editable=False, help_text=_('How many times the system has tried to remove this grant (and failed)')
)
driver_data = JSONField(null=True, blank=True)
objects = AccessControlGrantQuerySet.as_manager()
class Meta:
index_together = (('resource', 'state'),)
def __str__(self) -> str:
return _("{user} {reservation} ({state})").format(
user=self.user, reservation=self.reservation, state=self.state
)
def cancel(self):
"""Cancels a grant.
The method checks if the grant was installed into the access control system
already and asks for its revocation is it was. Otherwise the grant is just marked
as removed.
"""
logger.info('[%s] Canceling' % self)
if self.state not in (self.REQUESTED, self.INSTALLED):
logger.warn('[%s] Cancel called in invalid state')
return
if self.state == self.REQUESTED:
self.state = self.REMOVED
else:
self.state = self.CANCELLED
self.save(update_fields=['state'])
if self.state == self.CANCELLED:
self.resource.system.prepare_remove_grant(self)
def install(self):
"""Installs the grant to the remote access control system.
"""
logger.info('[%s] Installing' % self)
assert self.state == self.REQUESTED
# Sanity check to make sure we don't try to install grants
# for past reservations.
if self.ends_at < timezone.now():
logger.error('[%s] Attempted to install grant for a past reservation')
self.cancel()
return
with transaction.atomic():
# Set the state while locking the resource to protect against race
# conditions.
db_self = AccessControlGrant.objects.select_related('resource').select_for_update().get(id=self.id)
if db_self.state != self.REQUESTED:
logger.error('[%s] Race condition with grant' % self)
return
self.state = self.INSTALLING
# After the state is set to 'installing', we have exclusive access.
self.save(update_fields=['state'])
try:
self.resource.system.install_grant(self)
except Exception as e:
logger.exception('[%s] Failed to grant access' % self)
# If we fail, we retry after a while
self.installation_failures += 1
self.state = self.REQUESTED
min_delay = min(1 << self.installation_failures, 30 * 60)
retry_delay = random.randint(min_delay, 2 * min_delay)
self.install_at = timezone.now() + timedelta(seconds=retry_delay)
self.save(update_fields=['state', 'installation_failures', 'install_at'])
logger.info('[%s] Retrying after %d seconds' % (self, retry_delay))
def remove(self):
"""Removes the grant from the remote access control system.
"""
logger.info('[%s] Removing' % self)
assert self.state in (self.INSTALLED, self.CANCELLED)
old_state = self.state
with transaction.atomic():
db_self = AccessControlGrant.objects.select_related('resource').select_for_update().get(id=self.id)
if db_self.state != old_state:
logger.error('[%s] Race condition with grant' % self)
return
self.state = self.REMOVING
self.save(update_fields=['state'])
try:
self.resource.system.remove_grant(self)
except Exception as e:
logger.exception('[%s] Failed to revoke access' % self)
# If we fail, we retry after a while
self.removal_failures += 1
self.state = old_state
min_delay = min(1 << self.removal_failures, 30 * 60)
retry_delay = random.randint(min_delay, 2 * min_delay)
self.remove_at = timezone.now() + timedelta(seconds=retry_delay)
self.save(update_fields=['state', 'removal_failures', 'remove_at'])
logger.info('[%s] Retrying after %d seconds' % (self, retry_delay))
def notify_access_code(self):
reservation = self.reservation
reservation.access_code = self.access_code
reservation.save(update_fields=['access_code'])
logger.info('Notifying access code creation')
reservation.send_access_code_created_mail()
class AccessControlResource(models.Model):
system = models.ForeignKey(
'AccessControlSystem', related_name='resources', on_delete=models.CASCADE,
verbose_name=_('system')
)
# If a Respa resource is deleted, it will be marked as None here.
# AccessControlResources with resource == None should be deleted.
resource = models.ForeignKey(
'resources.Resource', related_name='access_control_resources', on_delete=models.SET_NULL,
verbose_name=_('resource'), null=True
)
identifier = models.CharField(
max_length=100, null=True, blank=True, verbose_name=_('identifier'),
help_text=_('Identifier of resource in the access control system (if any)')
)
driver_config = JSONField(null=True, blank=True, help_text=_('Driver-specific configuration'))
driver_data = JSONField(null=True, editable=False, help_text=_('Internal driver data'))
class Meta:
unique_together = (('system', 'resource'),)
def __str__(self) -> str:
return "%s: %s" % (self.system, self.resource)
def save(self, *args, **kwargs):
self.system.save_resource(self)
super().save(*args, **kwargs)
def pad_start_and_end_times(self, start, end):
system = self.system
leeway = system.reservation_leeway or 0
start -= timedelta(minutes=leeway)
end += timedelta(minutes=leeway)
return start, end
def _grant_has_changed(self, old_grant, new_grant):
if old_grant.user:
user = old_grant.user.user
if new_grant.reservation.user != user:
return True
if old_grant.starts_at != new_grant.starts_at:
return True
if old_grant.ends_at != new_grant.ends_at:
return True
return False
def grant_access(self, reservation):
assert reservation.resource == self.resource
grant = AccessControlGrant(
resource=self, reservation=reservation, state=AccessControlGrant.REQUESTED
)
grant.starts_at, grant.ends_at = self.pad_start_and_end_times(reservation.begin, reservation.end)
with transaction.atomic():
existing_grants = reservation.access_control_grants\
.filter(resource=self).active().select_related('resource').select_for_update()
old_grant = None
assert len(existing_grants) <= 1
if existing_grants:
old_grant = existing_grants[0]
if not self._grant_has_changed(old_grant, grant):
return old_grant
else:
old_grant.cancel()
grant.save()
self.system.prepare_install_grant(grant)
return grant
def revoke_access(self, reservation):
assert reservation.resource == self.resource
with transaction.atomic():
existing_grants = reservation.access_control_grants\
.filter(resource=self).active().select_related('resource').select_for_update()
assert len(existing_grants) <= 1
if not existing_grants:
return
grant = existing_grants[0]
grant.cancel()
def driver_identifier(self):
return self.system.get_resource_identifier(self)
def active_grant_count(self):
return self.grants.active().count()
class AccessControlSystem(models.Model):
name = models.CharField(max_length=100, unique=True)
driver = models.CharField(max_length=30, choices=[(x[0], x[1]) for x in DRIVERS])
reservation_leeway = models.PositiveIntegerField(
null=True, blank=True, verbose_name=_('reservation leeway'),
help_text=_('How many minutes before and after the reservation the access will be allowed')
)
driver_config = JSONField(null=True, blank=True, help_text=_('Driver-specific configuration'))
driver_data = JSONField(null=True, editable=False, help_text=_('Internal driver data'))
# Cached driver instance
_driver = None
def clean(self):
driver = self._get_driver()
driver.validate_system_config(self.driver_config)
def __str__(self) -> str:
return "{name} ({driver})".format(name=self.name, driver=self.driver)
def _get_driver(self):
if self._driver is not None:
return self._driver
driver_class = driver_classes.get(self.driver)
if driver_class is None:
for name, verbose_name, driver_path in DRIVERS:
if name == self.driver:
break
else:
raise ImproperlyConfigured("Driver %s not found" % self.driver)
driver_class = import_string(driver_path)
driver_classes[self.driver] = driver_class
self._driver = driver_class(self)
return self._driver
def prepare_install_grant(self, grant: AccessControlGrant):
self._get_driver().prepare_install_grant(grant)
def prepare_remove_grant(self, grant: AccessControlGrant):
self._get_driver().prepare_remove_grant(grant)
def install_grant(self, grant: AccessControlGrant):
self._get_driver().install_grant(grant)
def remove_grant(self, grant: AccessControlGrant):
self._get_driver().remove_grant(grant)
def get_system_config_schema(self):
return self._get_driver().get_system_config_schema()
def get_resource_config_schema(self):
return self._get_driver().get_resource_config_schema()
def get_resource_identifier(self, resource: AccessControlResource):
return self._get_driver().get_resource_identifier(resource)
def save_respa_resource(self, resource: AccessControlResource, respa_resource: Resource):
"""Notify driver about saving a Respa resource
Allows for driver-specific customization of the Respa resource or the
corresponding access control resource. Called when the Respa resource object is saved.
NOTE: The driver must not call `respa_resource.save()`. Saving the resource
is handled automatically later.
"""
self._get_driver().save_respa_resource(resource, respa_resource)
def save_resource(self, resource: AccessControlResource):
"""Notify driver about saving an access control resource
Allows for driver-specific customization of the access control resource or the
corresponding Respa resource. Called when the access control resource is saved.
"""
self._get_driver().save_resource(resource)
|
479105
|
import logging
import re
import warnings
import numpy as np
import pandas as pd
idl2np_dtype = {1: np.byte, 2: np.int16, 3: np.int32, 4: np.float32,
5: np.float64}
idl2struct = {4: 'f', 5:'d'}
archtype2struct={'sparc': None, 'bigend': '>', 'litend': '<',
'alpha': None, 'ppc': None, 'x86': None, 'x86_64': None}
class ReadBin52(object):
"""
Class to read a bin 52 and organize the output
Attributes
----------
bin52data : ndarray
The raw, n-dimensional KRC data
casesize : int
The number of bytes in each KRC case
date : bytes
The date the read file was created
ddd : ndarray
Raw temperature data
ddd_pd : dataframe
A pandas dataframe of surface temperature data
filename : str
The name of the KRC file that was input
header : list
The KRC header unpack to a list of ints
headerlength : int
The length, in bytes, of the KRC header
ncases : int
The number of different cases the model was run for
ndim : int
TBD
ndx : int
The number of indices for a single KRC run
nhours : int
The number of hour bins per 24 Mars hours
nlats : int
The number of valid, non-zero latitude bins
nseasons : int
The number of seasons the model was run for
nvariables : int
The number of variables contained within the KRC
lookup table
structdtype : str
Describing endianess and data type
temperature_data : dataframe
A multi-indexed dataframe of surface temperatures
transferedlayers : int
The number of KRC transfered layers
version : list
The krc verison used to create the file in the form
[major, minor, release]
words_per_krc : int
The number of bytes per krc entry
"""
def __init__(self, filename, headerlen=512):
"""
Parameters
----------
filename : str
The file to read
headerlength : int
The length, in bytes, of the text header.
Default: 512
"""
# Get or setup the logging object
self.logger = logging.getLogger(__name__)
self.filename = filename
self.readbin5(headerlen)
print(self.ncases)
assert(self.ncases == self.bin52data.shape[0])
def definekrc(self, what='KRC', endianness='<'):
"""
Defines a custom binary data structure for the KRC files.
"""
if what == 'KRC':
numfd = 96 # Size of floats of T-dependent materials
numid = 40 # size of " " integers
numld = 20 # size of " " logicals
maxn1 = 30 # dimension of layers
maxn2 = 384 * 4 # dimensions of times of day
maxn3 = 16 # dimensions of iterations days
maxn4 = self.nlats * 2 - 1 # dimensions of latitudes
maxn5 = 161 # dimensions of seasons
maxn6 = 6 # dimensions of saved years
maxnh = self.nhours # dimensions of saved times of day
maxbot = 6 # dimensions of time divisions
numtit = 20 # number of 4-byte words in TITLE
numday = 5 # number of 4-byte words in DAY
e = endianness
self.logger.debug(self.structdtype)
#Define the structure of the KRC file
if self.structdtype == '<f':
self._krcstructure= np.dtype([('fd','{}{}f'.format(e, numfd)),
('id','{}{}i'.format(e, numid)),
('ld','{}{}i'.format(e, numld)),
('title','{}{}a'.format(e, 4 * numtit)),
('daytime','{}{}a'.format(e, 4 * numday)),
('alat','{}{}f4'.format(e, maxn4)),
('elev','{}{}f4'.format(e,maxn4))])
elif self.structdtype == '<d':
self._krcstructure = np.dtype([('fd','{}{}d'.format(e, numfd)),
('alat','{}{}d'.format(e, maxn4)),
('elev','{}{}d'.format(e,maxn4) ),
('id','{}{}i'.format(e, numid)),
('ld','{}{}i'.format(e, numld)),
('title','{}{}a'.format(e, 4 * numtit) ),
('daytime','{}{}a'.format(e, 4 * numday))])
def readbin5(self, headerlen):
"""
Reads the type 52 file containing KRC output.
Tested with KRC version 2.2.2. Note that the output format
can change
Parameters
----------
filename (str) Full PATH to the file
"""
def _parse_header():
header = re.findall(b'\d+', fullheader.split(b'<<')[0])
header = list(map(int, header))
self.ndim = header[0]
self.nhours = header[1]
self.nvariables = header[2]
self.nlats = header[3]
self.nseasons = header[4] - 1
self.headerlength = header[8]
self.ncases = header[0 + self.ndim]
self.header = header
print(self.header)
# Compute how large each case is
self.casesize = self.nhours
if self.ndim > 1:
for k in range(1, self.ndim - 1):
self.casesize *= header[k + 1]
def _parse_front():
# Read the front matter
front = np.fromfile(bin5, dtype=self.structdtype, count=4).astype(np.int)
self.words_per_krc = front[0]
self.ndx = front[2]
def _read_data():
bin5.seek(self.headerlength)
self.bin52data = np.fromfile(bin5,
dtype=self.structdtype)
self.logger.debug(len(self.bin52data))
indices = arraysize[1: -1]
self.bin52data = self.bin52data.reshape(indices[: : -1])
def _read_metadata():
if self.structdtype == '<f':
j = self.headerlength + 16 # Skip header plus 1 16bit entry
elif self.structdtype == '<d':
j = self.headerlength + 32 # Skip header plus 1 32bit entry
bin5.seek(j)
self.definekrc('KRC')
structarr = np.fromfile(bin5, dtype=self._krcstructure, count=1)
if self.structdtype == '<f':
self.structarr = {'fd': structarr[0][0],
'id': structarr[0][1],
'ld': structarr[0][2],
'title': structarr[0][3],
'date': structarr[0][4],
'alat': structarr[0][5],
'elevation': structarr[0][6]
}
elif self.structdtype == '<d':
self.structarr = {'fd': structarr[0][0],
'alat': structarr[0][1],
'elevation': structarr[0][2],
'id': structarr[0][3],
'ld': structarr[0][4],
'title': structarr[0][5],
'date':structarr[0][6]}
def _get_version():
ver = re.findall(b'\d+', head)
ver = list(map(int, ver))
self.version = ver[: 3]
with open(self.filename, 'rb') as bin5:
#To handle endianness and architectures
archbytes = 8
c_end = 5
fullheader = bin5.read(headerlen)
_parse_header()
print(self.header)
arraysize = self.header[0: self.ndim + 2]
arraydtypecode = arraysize[arraysize[0] + 1]
try:
arraydtype = idl2np_dtype[arraydtypecode]
self.logger.debug("Dtype: ", arraydtype)
except KeyError:
self.logger.error("Unable to determine input datatype.")
assert(len(self.header) == self.ndim + 4)
if self.headerlength > 512:
warnings.Warn('Expected header to be 512 bytes, is {} bytes'.format(self.headerlength))
return
#Get the endianness of the input file and the data type (32 or 64 bit)
archstart = self.headerlength - (archbytes + c_end)
archend = self.headerlength - c_end
encodingarch = fullheader[archstart: archend].rstrip()
encodingarch = encodingarch.decode()
self._endianness = archtype2struct[encodingarch]
self.structdtype = self._endianness + idl2struct[arraydtypecode]
#Get the date and head debug
idx2 = fullheader.find(b'>>')
idx1 = idx2 - 21
self.date = fullheader[idx1: idx1 + 20]
head = fullheader[idx2 + 2: self.headerlength - (archbytes + c_end) - 3 - idx2]
head = head.rstrip()
# Parse the header
_get_version()
_parse_front()
_read_data()
_read_metadata()
def readcase(self, case=0):
"""
Read a single dimension (case) from a bin52 file.
Parameters
-----------
case : int
The case to be extracted
"""
def latitems2dataframe():
"""
Converts Latitude items to a dataframe
"""
columns = ['# Days to Compute Soln.',
'RMS Temp. Change on Last Day',
'Predicted Final Atmospheric Temp.',
'Predicted frost amount, [kg/m^2]',
'Frost albedo (at the last time step)',
'Mean upward heat flow into soil surface on last day, [W/m^2]']
# Grab the correct slice from the data cube and reshape
latitems = layeritems[: ,: ,: ,: ,0: 3].reshape(self.ncases,
self.nseasons,
self.nlats, len(columns))
# Multi-index generation
idxcount = self.nseasons * self.nlats * self.ncases
idxpercase = self.nseasons * self.nlats
caseidx = np.empty(idxcount)
for c in range(self.ncases):
start = c * idxpercase
caseidx[start:start+idxpercase] = np.repeat(c, idxpercase)
nseasvect = np.arange(self.nseasons)
seasonidx = np.repeat(np.arange(self.nseasons), self.nlats)
latidx = np.tile(self.latitudes.values.ravel(), self.nseasons)
# Pack the dataframe
self.latitude = pd.DataFrame(latitems.reshape(self.nseasons * self.nlats, -1),
index=[caseidx, seasonidx, latidx],
columns=columns)
self.latitude.index.names = ['Case','Season' ,'Latitude']
def layer2dataframe():
"""
Converts layeritems into
"""
columns = ['Tmin', 'Tmax']
ddd = layeritems[: ,: ,: ,: ,3: 3 + self.transferedlayers].reshape(self.ncases,
self.nseasons,
self.nlats,
len(columns),
self.transferedlayers)
idxcount = self.nseasons * self.nlats * self.transferedlayers * self.ncases
caseidx = np.empty(idxcount)
idxpercase = self.nseasons * self.nlats * self.transferedlayers
for c in range(self.ncases):
start = c * idxpercase
caseidx[start:start + idxpercase] = np.repeat(c, idxpercase)
seasonidx = np.repeat(np.arange(self.nseasons), idxcount / self.nseasons / self.ncases)
nlatidx = np.repeat(self.latitudes.values.ravel(), idxcount / self.transferedlayers / self.ncases)
tranlayeridx = np.tile(np.repeat(np.arange(self.transferedlayers), self.nlats), self.nseasons)
self.layers = pd.DataFrame(ddd.reshape(idxcount, -1),
columns=columns,
index=[caseidx, seasonidx, nlatidx, tranlayeridx])
self.layers.index.names = ['Case', 'Season', 'Latitude', 'Layer']
def latelv2dataframes():
"""
Convert the latitude and elevation arrays to dataframes
"""
#All latitudes
#Hugh made some change to the krcc format, but I cannot find documentation...
if self.structdtype == '<f':
alllats = krcc[:,prelatwords:].reshape(2, nlat_include_null, self.ncases)
elif self.structdtype == '<d':
alllats = krcc[:,96:170].reshape(2, nlat_include_null, self.ncases)
#Latitudes and elevations for each case
latelv = alllats[: ,0: nlat]
if latelv.shape[-1] == 1:
latelv = latelv[:,:,0]
self.latitudes = pd.DataFrame(latelv[0], columns=['Latitude'])
self.elevations = pd.DataFrame(latelv[1], columns=['Elevation'])
def season2dataframe():
columns = ['Current Julian date (offset from J2000.0)',
'Seasonal longitude of Sun (in degrees)',
'Current surface pressure at 0 elevation (in Pascals)',
'Mean visible opacity of dust, solar wavelengths',
'Global average columnar mass of frost [kg /m^2]']
# Build a dataframe of the seasonal information
seasitems = header[:, 4 + self.words_per_krc: k ].reshape(self.ncases,
len(columns),
self.nseasons)
caseidx = np.repeat(np.arange(self.ncases), self.nseasons)
seasonidx = np.repeat(np.arange(self.nseasons), self.ncases)
flt_seasitems = seasitems.reshape(len(columns),
self.ncases * self.nseasons)
self.seasons = pd.DataFrame(flt_seasitems.T,
index=[caseidx,seasonidx],
columns=columns)
self.seasons.index.names = ['Case', 'Season']
def hourly2dataframe():
"""
Converts the hourly 'ttt' vector to a
labelled Pandas dataframe.
"""
columns = ['Final Hourly Surface Temp.',
'Final Hourly Planetary Temp.',
'Final Hourly Atmospheric Temp.',
'Hourly net downward solar flux [W/m^2]',
'Hourly net downward thermal flux [W/m^2]']
ttt = self.bin52data[: ,self.ndx: ,: ,0: len(columns),: ].reshape(self.ncases,
self.nseasons,
self.nlats,
len(columns),
self.nhours)
reshapettt = np.swapaxes(ttt.reshape(self.ncases * self.nseasons * self.nlats,
len(columns),
self.nhours),1,2)
shp = reshapettt.shape
reshapettt = reshapettt.reshape((shp[0] * shp[1], shp[2])).T
#Indices
caseidx = np.repeat(np.arange(self.ncases), self.nseasons * self.nlats * self.nhours)
seasonidx = np.tile(np.repeat(np.arange(self.nseasons), self.nlats * self.nhours), self.ncases)
latidx = np.tile(np.repeat(self.latitudes.values.ravel(), self.nhours), self.nseasons)
houridx = np.tile(np.tile(np.tile(np.arange(self.nhours), self.nlats), self.nseasons), self.ncases)
#DataFrame
self.temperatures = pd.DataFrame(reshapettt.T,
index=[caseidx, seasonidx, latidx, houridx],
columns=columns)
self.temperatures.index.names = ['Case', 'Season', 'Latitude', 'Hour']
self.nlayers = nlayers = self.structarr['id'][0]
nlat = len(self.structarr['alat'])
self.transferedlayers = nlayers - 1
if self.nhours - 3 < self.transferedlayers:
self.transferedlayers = self.nhours - 3
wordsperlat = self.nhours * self.nvariables
wordsperseason = wordsperlat * self.nlats
# Each case has a header that must be extracted:
header = self.bin52data[:,0:self.ndx,: ,: ,: ].reshape(self.ncases,
wordsperseason * self.ndx)
k = 4 + self.words_per_krc + 5 * self.nseasons
krcc = header[:, 4: 4 + self.words_per_krc]
nlat_include_null = len(self.structarr['alat'])
prelatwords = self.words_per_krc - 2 * nlat_include_null
wordspercase = wordsperseason * self.nseasons
# Extract the latitude and elevation metadata
latelv2dataframes()
#Extract the hourly temperature data
hourly2dataframe()
# Extract the seasons
season2dataframe()
# Extract by layer data from the data cube
layeritems = self.bin52data[: , self.ndx: , : , 5: 7, : ]
latitems2dataframe()
layer2dataframe()
class ReadTds(object):
def __init__(self, filename, headerlength=512):
"""
Parameters
----------
filename : str
The file to read
headerlength : int
The length, in bytes, of the text header.
Default: 512
"""
# Get or setup the logging object
self.logger = logging.getLogger(__name__)
self.filename = filename
self.readbin5(headerlen)
print(self.ncases)
assert(self.ncases == self.bin52data.shape[0])
|
479119
|
whitelist = ["МОРОКА", "МОРС", "МОРОЗ", "ПЛАХА", "ПЛАТЬЕ", "ПЛОМБА"]
def analyze(s, pref):
if s in whitelist:
return False, [f"{pref:<6} {s} - LOSING"]
results = []
for word in whitelist:
if word.startswith(s):
result = analyze(s + word[len(s)], f"{pref:<6} --{word:<6}-->")
if not result[0]:
return True, result[1]
results += result[1]
return False, results
for line in analyze("", "")[1]:
print(line)
|
479146
|
from ..factory import Type
class passportElementsWithErrors(Type):
elements = None # type: "vector<PassportElement>"
errors = None # type: "vector<passportElementError>"
|
479167
|
class Cheat:
def __init__(self, name: str):
self.name = name
self.is_running = False
def set_is_running(self, is_running: bool):
self.is_running = is_running
|
479174
|
from gb_utils.greenberry_lex import GreenBerryLex
from symbols import *
from debug_cp import *
import inspect
MATH_OPS = ["+", "-", "*", "/"]
BOOLS = [S.TRUE, S.FALSE]
BOOL_OPS = [S.GREATER, S.LESS]
EOS = [S.NL, S.EOF]
def greenberry_lex_test(x, expected):
KWDs = [
getattr(S, i)
for i in [
b[0]
for b in [
a
for a in inspect.getmembers(S, lambda a: not inspect.isroutine(a))
if not (a[0].startswith("__") and a[0].endswith("__"))
]
]
]
words = GreenBerryLex.lex(x, KWDs, add_eof=1)
print("\033[1mWords:\033[0m")
is_correct = True
j = 0
for i in words:
print(i)
if not i == expected[j]:
print("\x1b[31m This token is unexpected.\x1b[39m")
is_correct = False
j += 1
return is_correct
def greenberry_lex_tester(to_lex, *args):
l_args = list(args)
l_args.append("{***end-of-file***}")
result = greenberry_lex_test(to_lex, l_args)
if result:
print("\x1b[32m Test passed \x1b[39m")
else:
print("\x1b[31m Test failed \x1b[39m")
return result
def greenberry_multi_tests(*args):
result = True
for i in args:
cur = greenberry_lex_tester(i["test"], *i["expected"])
if not cur:
result = False
if result:
print("\x1b[32m All tests passed. \x1b[39m")
else:
print("\x1b[31m A test failed. \x1b[39m")
greenberry_multi_tests({
"test": "print \"hi\"",
"expected": ["print", "\"hi\""]
},
{
"test": "print string hi",
"expected": ["print", "string", "hi"]
},
{
"test": "5 * 3 + (3 / 1)",
"expected": ["5", "*", "3", "+", "(", "3", "/", "1", ")"]
},
{
"test": "for 3 times: print greenBerry",
"expected": ["for", "3", "times", ":", "print", "greenBerry"]
},
{
"test": "var y = @ x",
"expected": ["var", "y", "=", "@", "x"]
})
|
479192
|
from .frame import Frame
from .parser import Parser
from .settings import *
class Traverser:
def __init__(self, frames, mapping):
self.frames = frames
self.mapping = mapping
# Finds node within XP, for which negation may produce a reasonable goal with the returned operator
# Returns: (frame, operator, effect) triplet if an operator, frame, and effect are found which remedies the anomaly. If none are found, returns (None, None, None)
def traverse(self):
# Want search in a BFS manner
queue = [] # queue for BFS
# Find anomaly center
center = None
for f in list(self.frames.values()):
if f.iscenter:
center = f
covered = [] # do not re-search nodes
queue.append(center)
while len(queue) > 0:
node = queue.pop(0)
covered.append(node)
for role in list(node.roles.values()):
for rn in set(role.facetvalue + role.facetrelation):
if rn not in covered and rn not in queue:
queue.append(rn)
if node.name in list(self.mapping.keys()):
for [operator, effect] in self.mapping[node.name]:
if self.evaluate(node, effect):
return (node, operator, effect)
return (None, None, None)
# Evaluates the result of applying effect to node within frames.
# Returns: True for good (remedies anomaly), False for bad
def evaluate(self, node, effect):
if effect == OPERATOR_EFFECT_NEGATION:
negated = [node]
# Find nodes which depend on negated nodes, negate them.
# Stop when either the center node is negated, or no more nodes can be negated.
# Assumption: All edges indicate dependencies. This is invalid and will need to be updated.
expanded = True
while expanded:
expaned = False
for frame in list(self.frames.values()):
if not frame in negated:
for role in list(frame.roles.values()):
for rn in set(role.facetvalue + role.facetrelation):
if rn in negated and not frame in negated:
if frame.iscenter:
return True
negated.append(frame)
expanded = True
return False
if __name__ == "__main__":
# get text
f = open("../output.txt", "r")
text = f.read()
f.close()
# parse text
p = Parser()
frames = p.makeframegraph(text)
# create mapping
noem = {} # Node Operator Effect Mapping
# Keys are node/frame names, values are lists of [operatorname, effect] pairs
noem['CRIMINAL-VOLITIONAL-AGENT.4697'] = [['apprehend', OPERATOR_EFFECT_NEGATION]]
# Traverse
t = Traverser(frames, noem)
(frame, operator, effect) = t.traverse()
print("Frame: " + frame.name)
print("Operator: " + operator)
print("Effect: " + str(effect))
|
479194
|
import logging
import re
from plane import replace
from plane.pattern import EMAIL, TELEPHONE
from tqdm import tqdm
from dbpunctuator.utils import (
CURRENCY,
CURRENCY_TOKEN,
EMAIL_TOKEN,
NUMBER,
NUMBER_TOKEN,
TELEPHONE_TOKEN,
URL,
URL_TOKEN,
)
tqdm.pandas()
logger = logging.getLogger(__name__)
def dataframe_data_cleaning(
df, target_col, kept_punctuations, removed_punctuations, *special_cleaning_funcs
):
"""
Clean up data in dataframe by removing all special characters except kept ones.
"""
if special_cleaning_funcs:
for func in special_cleaning_funcs:
df[target_col] = df[target_col].progress_apply(lambda x: func(x))
# replace email with <Special> and replace url with <URL>
logger.info("replace email with <EMAIL>")
df[target_col] = df[target_col].progress_apply(
lambda x: replace(x, EMAIL, EMAIL_TOKEN)
)
logger.info("replace url with <URL>")
df[target_col] = df[target_col].progress_apply(lambda x: replace(x, URL, URL_TOKEN))
logger.info("replace currency with <CURRENCY>")
df[target_col] = df[target_col].progress_apply(
lambda x: replace(x, CURRENCY, CURRENCY_TOKEN)
)
logger.info("replace telephone with <TEL>")
df[target_col] = df[target_col].progress_apply(
lambda x: replace(x, TELEPHONE, TELEPHONE_TOKEN)
)
logger.info("replace number with <NUM>")
df[target_col] = df[target_col].progress_apply(
lambda x: replace(x, NUMBER, NUMBER_TOKEN)
)
translator = str.maketrans({key: None for key in removed_punctuations})
space_translator = str.maketrans(
{key: " {0} ".format(chr(key)) for key in kept_punctuations}
)
df[target_col] = df[target_col].progress_apply(
lambda x: x.translate(translator).translate(space_translator).strip()
)
df.dropna(subset=[target_col])
return df
def text_lines_cleaning(
input_lines, kept_punctuations, removed_punctuations, *special_cleaning_funcs
):
logger.info("clean up text file line by line.")
logger.info("replace email with <EMAIL>")
logger.info("replace url with <URL>")
logger.info("replace currency with <CURRENCY>")
logger.info("replace telephone with <TEL>")
logger.info("replace number with <NUM>")
pbar = tqdm(input_lines)
for line in pbar:
if special_cleaning_funcs:
for func in special_cleaning_funcs:
try:
line = func(line)
except Exception as err:
logger.warning(f"error {str(err)} with func {func} for line {line}")
line = replace(line, EMAIL, EMAIL_TOKEN)
line = replace(line, URL, URL_TOKEN)
line = replace(line, CURRENCY, CURRENCY_TOKEN)
line = replace(line, TELEPHONE, TELEPHONE_TOKEN)
line = replace(line, NUMBER, NUMBER_TOKEN)
translator = str.maketrans({key: None for key in removed_punctuations})
space_translator = str.maketrans(
{key: " {0} ".format(chr(key)) for key in kept_punctuations}
)
yield line.translate(translator).translate(space_translator).strip()
pbar.close()
def cleaning_validator(text, kept_punctuations, removed_punctuations):
regex = re.compile(
"[{}]".format("|".join(map(re.escape, [chr(p) for p in removed_punctuations])))
)
checking_result = regex.search(text)
assert (
checking_result is None
or text[checking_result.span()[0] : checking_result.span()[1]]
in kept_punctuations
), f"data cleaning for `{text}`` doesn't pass the validation with {checking_result}"
return True
|
479214
|
import logging
import os
import platform
import numpy as np
import pybullet as p
import yaml
import igibson
from igibson.action_primitives.action_primitive_set_base import ActionPrimitiveError
from igibson.action_primitives.starter_semantic_action_primitives import StarterSemanticActionPrimitives
from igibson.objects.articulated_object import URDFObject
from igibson.robots.behavior_robot import BehaviorRobot
from igibson.scenes.igibson_indoor_scene import InteractiveIndoorScene
from igibson.simulator import Simulator
from igibson.utils.assets_utils import get_ig_avg_category_specs, get_ig_model_path
from igibson.utils.utils import parse_config
def execute_controller(ctrl_gen, robot, s):
for action in ctrl_gen:
robot.apply_action(action)
s.step()
def go_to_sink_and_toggle(s, robot, controller: StarterSemanticActionPrimitives):
"""Go to the sink object in the scene and toggle it on."""
for i in range(20):
try:
sink = s.scene.objects_by_category["sink"][1]
print("Trying to NAVIGATE_TO sink.")
execute_controller(controller._navigate_to_obj(sink), robot, s)
print("NAVIGATE_TO sink succeeded!")
print("Trying to TOGGLE_ON the sink.")
execute_controller(controller.toggle_on(sink), robot, s)
print("TOGGLE_ON the sink succeeded!")
except ActionPrimitiveError:
print("Attempt {} to navigate and toggle on the sink failed. Retry until 20.".format(i + 1))
continue
return
def grasp_tray(s, robot, controller: StarterSemanticActionPrimitives):
"""Grasp the tray that's on the floor of the room."""
for i in range(20):
try:
print("Trying to GRASP tray.")
tray = s.scene.objects_by_category["tray"][0]
execute_controller(controller.grasp(tray), robot, s)
print("GRASP the tray succeeded!")
except ActionPrimitiveError:
print("Attempt {} to grasp the tray failed. Retry until 20.".format(i + 1))
continue
return
def put_on_table(s, robot, controller: StarterSemanticActionPrimitives):
"""Place the currently-held object on top of the coffee table."""
for i in range(20):
try:
print("Trying to PLACE_ON_TOP the held object on coffee table.")
table = s.scene.objects_by_category["coffee_table"][0]
execute_controller(controller.place_on_top(table), robot, s)
print("PLACE_ON_TOP succeeded!")
except ActionPrimitiveError:
print("Attempt {} to place the held object failed. Retry until 20.".format(i + 1))
continue
return
def open_and_close_fridge(s, robot, controller: StarterSemanticActionPrimitives):
"""Demonstrate opening and closing the fridge."""
for i in range(20):
try:
fridge = s.scene.objects_by_category["fridge"][0]
print("Trying to OPEN the fridge.")
execute_controller(controller.open(fridge), robot, s)
print("OPEN the fridge succeeded!")
print("Trying to CLOSE the fridge.")
execute_controller(controller.close(fridge), robot, s)
print("CLOSE the fridge succeeded!")
except ActionPrimitiveError:
print("Attempt {} to open and close the fridge failed. Retry until 20.".format(i + 1))
continue
return
def open_and_close_door(s, robot, controller: StarterSemanticActionPrimitives):
"""Demonstrate opening and closing the bathroom door."""
for i in range(20):
try:
door = (set(s.scene.objects_by_category["door"]) & set(s.scene.objects_by_room["bathroom_0"])).pop()
print("Trying to OPEN the door.")
execute_controller(controller.open(door), robot, s)
print("Trying to CLOSE the door.")
execute_controller(controller.close(door), robot, s)
print("CLOSE the door succeeded!")
except ActionPrimitiveError:
print("Attempt {} to open and close the door failed. Retry until 20.".format(i + 1))
continue
return
def open_and_close_cabinet(s, robot, controller: StarterSemanticActionPrimitives):
"""Demonstrate opening and closing a drawer unit."""
for i in range(20):
try:
cabinet = s.scene.objects_by_category["bottom_cabinet"][2]
print("Trying to OPEN the cabinet.")
execute_controller(controller.open(cabinet), robot, s)
print("Trying to CLOSE the cabinet.")
execute_controller(controller.close(cabinet), robot, s)
print("CLOSE the cabinet succeeded!")
except ActionPrimitiveError:
print("Attempt {} to open and close the cabinet failed. Retry until 20.".format(i + 1))
continue
return
def main(selection="user", headless=False, short_exec=False):
"""
Launches a simulator scene and showcases a variety of semantic action primitives such as navigation, grasping,
placing, opening and closing.
"""
print("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80)
# Create the simulator.
s = Simulator(
mode="headless" if headless else "gui_non_interactive" if platform.system() != "Darwin" else "gui_interactive",
image_width=512,
image_height=512,
device_idx=0,
use_pb_gui=(not headless and platform.system() != "Darwin"),
)
scene = InteractiveIndoorScene(
"Rs_int", load_object_categories=["walls", "floors", "bottom_cabinet", "door", "sink", "coffee_table", "fridge"]
)
s.import_scene(scene)
# Create a custom tray object for the grasping test.
model_path = get_ig_model_path("tray", "tray_000")
model_filename = os.path.join(model_path, "tray_000.urdf")
avg_category_spec = get_ig_avg_category_specs()
tray = URDFObject(
filename=model_filename,
category="tray",
name="tray",
avg_obj_dims=avg_category_spec.get("tray"),
fit_avg_dim_volume=True,
model_path=model_path,
)
s.import_object(tray)
tray.set_position_orientation([0, 1, 0.3], p.getQuaternionFromEuler([0, np.pi / 2, 0]))
# Load the robot and place it in the scene.
config = parse_config(os.path.join(igibson.configs_path, "behavior_robot_mp_behavior_task.yaml"))
config["robot"]["show_visual_head"] = True
robot = BehaviorRobot(**config["robot"])
s.import_robot(robot)
robot.set_position_orientation([0, 0, 1], [0, 0, 0, 1])
robot.apply_action(
np.zeros(
robot.action_dim,
)
)
# Run some steps to let physics settle.
for _ in range(300):
s.step()
# Create an Action Primitive Set and use it to convert high-level actions to low-level actions and execute.
controller = StarterSemanticActionPrimitives(None, scene, robot)
try:
# The pick-and-place demo is always run.
grasp_tray(s, robot, controller)
put_on_table(s, robot, controller)
# The other demos are only run in the long execution mode.
if not short_exec:
go_to_sink_and_toggle(s, robot, controller)
open_and_close_fridge(s, robot, controller)
open_and_close_door(s, robot, controller)
open_and_close_cabinet(s, robot, controller)
# If we're not running in headless mode, let the simulator run idle after we are done to allow user to inspect.
if not headless:
while True:
action = np.zeros(robot.action_dim)
robot.apply_action(action)
s.step()
finally:
s.disconnect()
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
main()
|
479242
|
import rdkit
from rdkit.Chem import rdMolTransforms
from rdkit.Chem import TorsionFingerprints
import numpy as np
import networkx as nx
import random
atomTypes = ['H', 'C', 'B', 'N', 'O', 'F', 'Si', 'P', 'S', 'Cl', 'Br', 'I']
formalCharge = [-1, -2, 1, 2, 0]
degree = [0, 1, 2, 3, 4, 5, 6]
num_Hs = [0, 1, 2, 3, 4]
local_chiral_tags = [0, 1, 2, 3]
hybridization = [
rdkit.Chem.rdchem.HybridizationType.S,
rdkit.Chem.rdchem.HybridizationType.SP,
rdkit.Chem.rdchem.HybridizationType.SP2,
rdkit.Chem.rdchem.HybridizationType.SP3,
rdkit.Chem.rdchem.HybridizationType.SP3D,
rdkit.Chem.rdchem.HybridizationType.SP3D2,
rdkit.Chem.rdchem.HybridizationType.UNSPECIFIED,
]
bondTypes = ['SINGLE', 'DOUBLE', 'TRIPLE', 'AROMATIC']
def one_hot_embedding(value, options):
embedding = [0]*(len(options) + 1)
index = options.index(value) if value in options else -1
embedding[index] = 1
return embedding
def adjacency_to_undirected_edge_index(adj):
adj = np.triu(np.array(adj, dtype = int)) #keeping just upper triangular entries from sym matrix
array_adj = np.array(np.nonzero(adj), dtype = int) #indices of non-zero values in adj matrix
edge_index = np.zeros((2, 2*array_adj.shape[1]), dtype = int) #placeholder for undirected edge list
edge_index[:, ::2] = array_adj
edge_index[:, 1::2] = np.flipud(array_adj)
return edge_index
def get_all_paths(G, N = 3):
# adapted from: https://stackoverflow.com/questions/28095646/finding-all-paths-walks-of-given-length-in-a-networkx-graph
def findPaths(G,u,n):
if n==0:
return [[u]]
paths = [[u]+path for neighbor in G.neighbors(u) for path in findPaths(G,neighbor,n-1) if u not in path]
return paths
allpaths = []
for node in G:
allpaths.extend(findPaths(G,node,N))
return allpaths
def getNodeFeatures(list_rdkit_atoms, owningMol):
F_v = (len(atomTypes)+1) +\
(len(degree)+1) + \
(len(formalCharge)+1) +\
(len(num_Hs)+1)+\
(len(hybridization)+1) +\
2 + 4 + 5 # 52
global_tags = dict(rdkit.Chem.FindMolChiralCenters(owningMol, force=True, includeUnassigned=True, useLegacyImplementation=False))
node_features = np.zeros((len(list_rdkit_atoms), F_v))
for node_index, node in enumerate(list_rdkit_atoms):
features = one_hot_embedding(node.GetSymbol(), atomTypes) # atom symbol, dim=12 + 1
features += one_hot_embedding(node.GetTotalDegree(), degree) # total number of bonds, H included, dim=7 + 1
features += one_hot_embedding(node.GetFormalCharge(), formalCharge) # formal charge, dim=5+1
features += one_hot_embedding(node.GetTotalNumHs(), num_Hs) # total number of bonded hydrogens, dim=5 + 1
features += one_hot_embedding(node.GetHybridization(), hybridization) # hybridization state, dim=7 + 1
features += [int(node.GetIsAromatic())] # whether atom is part of aromatic system, dim = 1
features += [node.GetMass() * 0.01] # atomic mass / 100, dim=1
### chiral tags go last ###
#global chiral tag
idx = node.GetIdx()
global_chiral_tag = 0
if idx in global_tags:
if global_tags[idx] == 'R':
global_chiral_tag = 1
elif global_tags[idx] == 'S':
global_chiral_tag = 2
else:
global_chiral_tag = -1
features += one_hot_embedding(global_chiral_tag, [0,1,2]) # chiral tag of atom, dim=3+1 (global chiral features)
#local chiral tag
features += one_hot_embedding(node.GetChiralTag(), local_chiral_tags) # chiral tag of atom, dim=4+1 (local chiral features)
node_features[node_index,:] = features
return np.array(node_features, dtype = np.float32)
def getEdgeFeatures(list_rdkit_bonds):
F_e = (len(bondTypes)+1) + 2 + (6+1) # 14
edge_features = np.zeros((len(list_rdkit_bonds)*2, F_e))
for edge_index, edge in enumerate(list_rdkit_bonds):
features = one_hot_embedding(str(edge.GetBondType()), bondTypes) # dim=4+1
features += [int(edge.GetIsConjugated())] # dim=1
features += [int(edge.IsInRing())] # dim=1
features += one_hot_embedding(edge.GetStereo(), list(range(6))) #dim=6+1
# Encode both directed edges to get undirected edge
edge_features[2*edge_index: 2*edge_index+2, :] = features
return np.array(edge_features, dtype = np.float32)
def getInternalCoordinatesFromAllPaths(mol, adj, repeats = False):
if isinstance(mol, rdkit.Chem.rdchem.Conformer):
conformer = mol
if isinstance(mol, rdkit.Chem.rdchem.Mol):
conformer = mol.GetConformer()
graph = nx.from_numpy_matrix(adj, parallel_edges=False, create_using=None)
distance_paths, angle_paths, dihedral_paths = get_all_paths(graph, N = 1), get_all_paths(graph, N = 2), get_all_paths(graph, N = 3)
if len(dihedral_paths) == 0:
raise Exception('No Dihedral Angle Detected')
bond_distance_indices = np.array(distance_paths, dtype = int)
bond_angle_indices = np.array(angle_paths, dtype = int)
dihedral_angle_indices = np.array(dihedral_paths, dtype = int)
if not repeats: # only taking (0,1) vs. (1,0); (1,2,3) vs (3,2,1); (1,3,6,7) vs (7,6,3,1)
bond_distance_indices = bond_distance_indices[bond_distance_indices[:, 0] < bond_distance_indices[:, 1]]
bond_angle_indices = bond_angle_indices[bond_angle_indices[:, 0] < bond_angle_indices[:, 2]]
dihedral_angle_indices = dihedral_angle_indices[dihedral_angle_indices[:, 1] < dihedral_angle_indices[:, 2]]
bond_distances = np.array([rdMolTransforms.GetBondLength(conformer, int(index[0]), int(index[1])) for index in bond_distance_indices], dtype = np.float32)
bond_angles = np.array([rdMolTransforms.GetAngleRad(conformer, int(index[0]), int(index[1]), int(index[2])) for index in bond_angle_indices], dtype = np.float32)
dihedral_angles = np.array([rdMolTransforms.GetDihedralRad(conformer, int(index[0]), int(index[1]), int(index[2]), int(index[3])) for index in dihedral_angle_indices], dtype = np.float32)
return bond_distances, bond_distance_indices, bond_angles, bond_angle_indices, dihedral_angles, dihedral_angle_indices
def embedConformerWithAllPaths(rdkit_mol3D, repeats = False):
if isinstance(rdkit_mol3D, rdkit.Chem.rdchem.Conformer):
mol = rdkit_mol3D.GetOwningMol()
conformer = rdkit_mol3D
elif isinstance(rdkit_mol3D, rdkit.Chem.rdchem.Mol):
mol = rdkit_mol3D
conformer = mol.GetConformer()
# Edge Index
adj = rdkit.Chem.GetAdjacencyMatrix(mol)
edge_index = adjacency_to_undirected_edge_index(adj)
# Edge Features
bonds = []
for b in range(int(edge_index.shape[1]/2)):
bond_index = edge_index[:,::2][:,b]
bond = mol.GetBondBetweenAtoms(int(bond_index[0]), int(bond_index[1]))
bonds.append(bond)
edge_features = getEdgeFeatures(bonds)
# Node Features
atoms = rdkit.Chem.rdchem.Mol.GetAtoms(mol)
atom_symbols = [atom.GetSymbol() for atom in atoms]
node_features = getNodeFeatures(atoms, mol)
bond_distances, bond_distance_indices, bond_angles, bond_angle_indices, dihedral_angles, dihedral_angle_indices = getInternalCoordinatesFromAllPaths(conformer, adj, repeats = repeats)
return atom_symbols, edge_index, edge_features, node_features, bond_distances, bond_distance_indices, bond_angles, bond_angle_indices, dihedral_angles, dihedral_angle_indices
|
479251
|
class TwoSum:
def __init__(self):
self.nums = {}
def add(self, number):
self.nums[number] = self.nums.get(number, 0) + 1
def find(self, value):
for num in self.nums:
if value - num in self.nums and (num != value - num or self.nums[num] > 1):
return True
return False
|
479254
|
from __future__ import absolute_import
import pytest
from aws_xray_sdk.core import xray_recorder
from aws_xray_sdk.core.context import Context
from aws_xray_sdk.ext.flask_sqlalchemy.query import XRayFlaskSqlAlchemy
from flask import Flask
from ...util import find_subsegment_by_annotation
app = Flask(__name__)
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///:memory:"
db = XRayFlaskSqlAlchemy(app)
class User(db.Model):
__tablename__ = "users"
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(255), nullable=False, unique=True)
fullname = db.Column(db.String(255), nullable=False)
password = db.Column(db.String(255), nullable=False)
@pytest.fixture(
params=[
False,
True,
],
)
def session(request):
"""Test Fixture to Create DataBase Tables and start a trace segment"""
xray_recorder.configure(service='test', sampling=False, context=Context(), stream_sql=request.param)
xray_recorder.clear_trace_entities()
xray_recorder.begin_segment('SQLAlchemyTest')
db.create_all()
yield
xray_recorder.end_segment()
xray_recorder.clear_trace_entities()
def test_all(capsys, session):
""" Test calling all() on get all records.
Verify that we capture trace of query and return the SQL as metdata"""
# with capsys.disabled():
User.query.all()
subsegment = find_subsegment_by_annotation(xray_recorder.current_segment(), 'sqlalchemy', 'sqlalchemy.orm.query.all')
assert subsegment['annotations']['sqlalchemy'] == 'sqlalchemy.orm.query.all'
assert subsegment['sql']['url']
assert bool(subsegment['sql'].get('sanitized_query', None)) is xray_recorder.stream_sql
def test_add(capsys, session):
""" Test calling add() on insert a row.
Verify we that we capture trace for the add"""
# with capsys.disabled():
john = User(name='John', fullname="<NAME>", password="password")
db.session.add(john)
subsegment = find_subsegment_by_annotation(xray_recorder.current_segment(), 'sqlalchemy', 'sqlalchemy.orm.session.add')
assert subsegment['annotations']['sqlalchemy'] == 'sqlalchemy.orm.session.add'
assert subsegment['sql']['url']
|
479283
|
from typing import Dict, Tuple, Union
from pyswip import Functor, Atom, Variable
from problog.logic import (
Term,
Constant,
list2term,
term2list,
Var,
is_list,
Clause,
And,
Or,
)
from problog.parser import PrologParser
from problog.program import ExtendedPrologFactory
PySwipObject = Union[Functor, Atom, Variable, int, float, list, bytes]
ProblogObject = Union[Term, Constant, Var, Clause, And, Or]
def pyswip_to_term(
pyswip_obj: PySwipObject, with_variables=False
) -> Union[ProblogObject, Tuple[ProblogObject, Dict[Var, Variable]]]:
variables = dict()
if type(pyswip_obj) is Functor:
args = []
for a in pyswip_obj.args:
args2, variables2 = pyswip_to_term(a, True)
args.append(args2)
variables.update(variables2)
# args = [pyswip_to_term(a) for a in pyswip_obj.args]
operator = pyswip_obj.name.get_value()
if operator == ":-":
new_term = Clause(*args)
elif operator == ",":
new_term = And.from_list(args)
elif operator == ";":
new_term = Or.from_list(args)
else:
new_term = Term(operator, *args)
elif type(pyswip_obj) is Atom:
new_term = Term(pyswip_obj.get_value())
elif type(pyswip_obj) is int or type(pyswip_obj) is float:
new_term = Constant(pyswip_obj)
elif type(pyswip_obj) is list:
lst = []
for o in pyswip_obj:
e, vars2 = pyswip_to_term(o, True)
lst.append(e)
variables.update(vars2)
new_term = list2term(lst)
elif type(pyswip_obj) is Variable:
new_term = Var(
pyswip_obj.chars if pyswip_obj.chars else f"Var{pyswip_obj.handle}"
)
variables = {new_term: pyswip_obj}
elif type(pyswip_obj) is bytes:
new_term = Constant(pyswip_obj.decode("utf-8"))
else:
raise Exception(
"Unhandled type {} from object {}".format(type(pyswip_obj), pyswip_obj)
)
if with_variables:
return new_term, variables
else:
return new_term
def term_to_pyswip(term: ProblogObject) -> PySwipObject:
if type(term) is Term:
if is_list(term):
return [term_to_pyswip(x) for x in term2list(term, False)]
args = [term_to_pyswip(arg) for arg in term.args]
if not args:
return Atom(term.functor)
functor = Functor(term.functor, arity=term.arity)
return functor(*args)
elif type(term) is Constant:
return term.functor
elif type(term) is Var:
return Variable(name=term.name)
else:
raise Exception(
"Unhandled type {} from object {} -> Robin has to fix it".format(
type(term), term
)
)
_parser = PrologParser(ExtendedPrologFactory())
def parse(to_parse: Union[str, PySwipObject]) -> ProblogObject:
if type(to_parse) is str:
return _parser.parseString(str(to_parse) + ".")[0]
return pyswip_to_term(to_parse)
|
479293
|
from maps.fixedkeymap import FixedKeyMap
from maps.frozenmap import FrozenMap
from maps.nameddict import NamedDict
from maps.namedfixedkeymap import NamedFixedKeyMapMeta
from maps.namedfrozenmap import NamedFrozenMapMeta
def namedfrozen(typename, fields, defaults={}):
'''Creates a new class that inherits from :class:`maps.FrozenMap` that has the
specified fields as keys. Fields are accessible via bracket-notation
(i.e. ``__getitem__``) as well as dot-notation (i.e. ``__getattr__``).
Instances of the returned class are immutable.
:param str typename: Name of the new Map class
:param iterable fields: Names of the fields
:param mapping defaults: Maps default values to fields
:raises ValueError: if the type name or field names or defaults provided are not properly formatted
:return: The newly created class
:rtype: class
Usage::
>>> import maps
>>> RGB = maps.namedfrozen('RGB', ['red', 'green', 'blue'], defaults={'green': 127, 'blue': 80})
>>> coral = RGB(255)
>>> coral['red']
255
>>> coral.green
127
'''
return NamedFrozenMapMeta(typename, fields, defaults)
def namedfixedkey(typename, fields, defaults={}):
'''Creates a new class that inherits from :class:`maps.FixedKeyMap` that has the
speciefied fields as keys. Fields are accessible via bracket-notation
(i.e. ``__getitem__``) as well as dot-notation (i.e. ``__getattr__``).
Instances of the returned class have a fixed set of keys, but the values
corresponding to those keys can be edited.
:param str typename: Name of the new Map class
:param iterable fields: Names of the fields
:param mapping defaults: Maps default values to fields
:raises ValueError: if the type name or field names or defaults provided are not properly formatted
:return: The newly created class
:rtype: class
Usage::
>>> import maps
>>> Person = maps.namedfixedkey('Person', ['name', 'gender', 'age'], defaults={'age': 40})
>>> bob = Person('bob', 'male')
>>> bob['name']
'bob'
>>> bob.gender
'male'
>>> bob.age += 1
>>> bob.age
41
'''
return NamedFixedKeyMapMeta(typename, fields, defaults)
|
479295
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from builtins import str # pylint: disable=redefined-builtin
import unittest
import cleverbot
class CleverbotTest(unittest.TestCase):
def test_replay(self):
cbc = cleverbot.Cleverbot("cleverbot-py-test")
try:
response = cbc.ask("Hi. How are you?")
except cleverbot.CleverbotServiceError:
# Technically, cleverbot errored. But we connected, which is all
# that matters
self.assertTrue(True)
else:
self.assertNotEquals(response, str())
|
479316
|
from mock import ANY
from raptiformica.actions.mesh import ensure_ipv6_enabled
from tests.testcase import TestCase
class TestEnsureIPv6Enabled(TestCase):
def setUp(self):
self.log = self.set_up_patch('raptiformica.actions.mesh.log')
self.run_command_print_ready = self.set_up_patch(
'raptiformica.actions.mesh.run_command_print_ready'
)
self.run_command_print_ready = self.set_up_patch(
'raptiformica.actions.mesh.run_command_print_ready'
)
self.log_failure_factory = self.set_up_patch(
'raptiformica.actions.mesh.log_failure_factory'
)
def test_ensure_ipv6_enabled_logs_info_message(self):
ensure_ipv6_enabled()
self.log.info.assert_called_once_with(ANY)
def test_ensure_ipv6_enabled_configures_ipv6_not_disabled_at_kernel_level(self):
ensure_ipv6_enabled()
self.run_command_print_ready.assert_called_once_with(
"/usr/bin/env sysctl net.ipv6.conf.all.disable_ipv6=0",
failure_callback=self.log_failure_factory.return_value,
shell=True,
buffered=False
)
def test_ensure_ipv6_enabled_uses_log_failure_factory(self):
ensure_ipv6_enabled()
self.log_failure_factory.assert_called_once_with(ANY)
|
479343
|
from datasets.MOT.dataset import MultipleObjectTrackingDataset_MemoryMapped, \
MultipleObjectTrackingDatasetSequence_MemoryMapped, MultipleObjectTrackingDatasetFrame_MemoryMapped
from miscellanies.viewer.qt5_viewer import Qt5Viewer
from datasets.base.common.viewer.qt5_viewer import draw_object
from PyQt5.QtGui import QPixmap, QColor
from miscellanies.simple_prefetcher import SimplePrefetcher
import random
__all__ = ['MOTDatasetQt5Viewer']
class _DatasetSequenceImageLoader:
def __init__(self, sequence: MultipleObjectTrackingDatasetSequence_MemoryMapped):
self.sequence = sequence
def __len__(self):
return len(self.sequence)
def __getitem__(self, index: int):
frame = self.sequence.get_frame(index)
pixmap = QPixmap()
assert pixmap.load(frame.get_image_path())
return pixmap, frame
class MOTDatasetQt5Viewer:
def __init__(self, dataset: MultipleObjectTrackingDataset_MemoryMapped):
self.dataset = dataset
self.viewer = Qt5Viewer()
self.canvas = self.viewer.get_subplot().create_canvas()
if dataset.has_category_id_name_map():
self.category_id_color_map = {}
for category_id in self.dataset.get_category_id_name_map().keys():
color = [random.randint(0, 255) for _ in range(3)]
self.category_id_color_map[category_id] = QColor(color[0], color[1], color[2], int(0.5 * 255))
else:
self.category_id_color_map = None
sequence_names = []
for sequence in self.dataset:
sequence_names.append(sequence.get_name())
self.viewer.get_content_region().new_list(sequence_names, self._sequence_selected_callback)
self.timer = self.viewer.new_timer()
self.timer.set_callback(self._timer_timeout_callback)
def _sequence_selected_callback(self, index: int):
if index < 0:
return
self.sequence = SimplePrefetcher(_DatasetSequenceImageLoader(self.dataset[index]))
self._stop_timer()
self._start_timer()
def _start_timer(self):
self.sequence_iter = iter(self.sequence)
self.timer.start()
def _timer_timeout_callback(self):
try:
image, frame = next(self.sequence_iter)
except StopIteration:
self._stop_timer()
return
frame: MultipleObjectTrackingDatasetFrame_MemoryMapped = frame
canvas = self.canvas
canvas.set_background(image)
with canvas.get_painter() as painter:
for object_ in frame:
draw_object(painter, object_, object_, object_, object_, self.category_id_color_map, self.dataset, self.dataset)
canvas.update()
def _stop_timer(self):
self.timer.stop()
def run(self):
return self.viewer.run_event_loop()
|
479394
|
from .base import ObjectBase
from .list import ObjectList
from .order_line import OrderLine
class Shipment(ObjectBase):
@property
def resource(self):
return self._get_property("resource")
@property
def id(self):
return self._get_property("id")
@property
def order_id(self):
return self._get_property("orderId")
@property
def created_at(self):
return self._get_property("createdAt")
@property
def tracking(self):
return self._get_property("tracking")
@property
def tracking_url(self):
return self.tracking["url"] if self.has_tracking_url() else None
@property
def lines(self):
"""Return the order lines of this shipment."""
lines = self._get_property("lines") or []
result = {
"_embedded": {
"lines": lines,
},
"count": len(lines),
}
return ObjectList(result, OrderLine, self.client)
@property
def order(self):
"""Return the order of this shipment."""
return self.client.orders.get(self.order_id)
# additional methods
def has_tracking(self):
return self.tracking is not None
def has_tracking_url(self):
return self.has_tracking() and self.tracking["url"] is not None
|
479408
|
from shovel import task
import subprocess
@task
def rst():
"""Convert markdown readme to reStructuredText"""
subprocess.call(['pandoc', '--from=markdown', '--to=rst', '--output=README', 'README.md'])
|
479417
|
from .base import Render
class RenderOutput(Render):
def __init__(self, string_to_format):
self.string_to_format = string_to_format
def render(self, **kwargs):
print(self._format(f"{self.string_to_format}%s" % "{reset}", **kwargs))
|
479437
|
from squid import *
import time
rgb = Squid(18, 23, 24)
rgb.set_color(RED)
time.sleep(2)
rgb.set_color(GREEN)
time.sleep(2)
rgb.set_color(BLUE)
time.sleep(2)
rgb.set_color(WHITE)
time.sleep(2)
rgb.set_color(WHITE, 300)
time.sleep(2)
|
479446
|
import os
import time
from transformers.pipelines import pipeline
import pytest
from answer import create_app
# initialize testing environment
hg_comp = pipeline('question-answering',
model="distilbert-base-uncased-distilled-squad",
tokenizer="distilbert-base-uncased-distilled-squad")
@pytest.fixture
def client():
app = create_app(hg_comp)
app.config["TESTING"] = True
with app.test_client() as client:
yield client
# Health check route test
def test_health(client):
r = client.get("/")
assert 200 == r.status_code
# Answer question route test
def test_answer(client):
payload = {
"question": "who did <NAME> play in waterloo rd?",
"context": "She attended the British drama school East 15 in 2005, and left after winning a high-profile role in the BBC drama Waterloo Road, playing the bully Leigh-Ann Galloway.[6] Since that role, Matthews has continued to act in BBC's Doctors, playing <NAME>; in ITV's The Bill playing drug addict <NAME>; and she was back in the BBC soap Doctors in 2009, playing Tansy Flack."
}
r = client.post("/answer", json=payload)
assert 200 == r.status_code
|
479529
|
import numpy as np
SKY_BETA160_C05_D1 = np.array(
[[0.39344519, 0.39292569, 0.39240611, 0.39188644, 0.3913667 ,
0.39084688, 0.39032699, 0.38980703, 0.389287 , 0.38876691,
0.38824676, 0.38772654, 0.38720627, 0.38668595, 0.38616558,
0.38564515, 0.38512468, 0.38460417, 0.38408362, 0.38356304,
0.38304241, 0.38252176, 0.38200108, 0.38148037, 0.38095963,
0.38043888, 0.37991811, 0.37939732, 0.37887652, 0.37835571,
0.37783489, 0.37731407, 0.37679325, 0.37627242, 0.3757516 ,
0.37523079, 0.37470998, 0.37418919, 0.37366841, 0.37314765,
0.37262691, 0.37210619, 0.37158549, 0.37106483, 0.37054419,
0.37002359, 0.36950302, 0.36898249, 0.368462 , 0.36794156,
0.36742116, 0.36690081, 0.36638051, 0.36586027, 0.36534008,
0.36481996, 0.36429989, 0.3637799 , 0.36325996, 0.3627401 ,
0.36222032, 0.3617006 , 0.36118097, 0.36066142, 0.36014195,
0.35962256, 0.35910327, 0.35858406, 0.35806495, 0.35754594,
0.35702703, 0.35650821, 0.35598951, 0.3554709 , 0.35495241,
0.35443403, 0.35391576, 0.35339762, 0.35287959, 0.35236168,
0.3518439 , 0.35132624, 0.35080872, 0.35029133, 0.34977407,
0.34925695, 0.34873997, 0.34822313, 0.34770644, 0.34718989,
0.3466735 , 0.34615726, 0.34564117, 0.34512524, 0.34460947,
0.34409386, 0.34357842, 0.34306315, 0.34254804, 0.34203311],
[0.39344519, 0.39292569, 0.39240611, 0.39188644, 0.3913667 ,
0.39084688, 0.39032699, 0.38980703, 0.389287 , 0.38876691,
0.38824676, 0.38772654, 0.38720627, 0.38668595, 0.38616558,
0.38564515, 0.38512468, 0.38460417, 0.38408362, 0.38356304,
0.38304241, 0.38252176, 0.38200108, 0.38148037, 0.38095963,
0.38043888, 0.37991811, 0.37939732, 0.37887652, 0.37835571,
0.37783489, 0.37731407, 0.37679325, 0.37627242, 0.3757516 ,
0.37523079, 0.37470998, 0.37418919, 0.37366841, 0.37314765,
0.37262691, 0.37210619, 0.37158549, 0.37106483, 0.37054419,
0.37002359, 0.36950302, 0.36898249, 0.368462 , 0.36794156,
0.36742116, 0.36690081, 0.36638051, 0.36586027, 0.36534008,
0.36481996, 0.36429989, 0.3637799 , 0.36325996, 0.3627401 ,
0.36222032, 0.3617006 , 0.36118097, 0.36066142, 0.36014195,
0.35962256, 0.35910327, 0.35858406, 0.35806495, 0.35754594,
0.35702703, 0.35650821, 0.35598951, 0.3554709 , 0.35495241,
0.35443403, 0.35391576, 0.35339762, 0.35287959, 0.35236168,
0.3518439 , 0.35132624, 0.35080872, 0.35029133, 0.34977407,
0.34925695, 0.34873997, 0.34822313, 0.34770644, 0.34718989,
0.3466735 , 0.34615726, 0.34564117, 0.34512524, 0.34460947,
0.34409386, 0.34357842, 0.34306315, 0.34254804, 0.34203311]])
SKY_BETA20_C05_D1 = np.array(
[[0.49191249, 0.47907243, 0.46639084, 0.4539596 , 0.44186595,
0.43019095, 0.41900828, 0.40838328, 0.39837241, 0.38902304,
0.38037348, 0.37245339, 0.36528422, 0.35888002, 0.35324812,
0.34839005, 0.34430235, 0.34097738, 0.3384041 , 0.33656874,
0.33545546, 0.3350468 , 0.3353242 , 0.33626834, 0.33785938,
0.34007726, 0.34290179, 0.34631281, 0.35029021, 0.35460871,
0.35899677, 0.36385087, 0.36914875, 0.374868 , 0.38098599,
0.3874798 , 0.39432619, 0.40150151, 0.4089817 , 0.41674221,
0.42475803, 0.43300368, 0.44145321, 0.45008021, 0.45885791,
0.46775917, 0.4767566 , 0.4858226 , 0.4949295 , 0.50404957,
0.51315521, 0.52221897, 0.53121369, 0.54011256, 0.5488892 ,
0.55751777, 0.56597297, 0.57423011, 0.58226515, 0.59005469,
0.59757597, 0.60480687, 0.61172586, 0.61831194, 0.62454461,
0.63040379, 0.63586971, 0.64092289, 0.645544 , 0.64971378,
0.65341298, 0.65662231, 0.65932232, 0.66149343, 0.66311588,
0.66420495, 0.66492791, 0.66502809, 0.66448647, 0.66328438,
0.6614038 , 0.65882772, 0.65554061, 0.65152894, 0.64678181,
0.64129165, 0.63505501, 0.62807333, 0.62035387, 0.61191049,
0.60276447, 0.59294524, 0.58249086, 0.57144843, 0.55987415,
0.54783307, 0.53539856, 0.52265139, 0.5096785 , 0.49657147],
[0.49191249, 0.47907243, 0.46639084, 0.4539596 , 0.44186595,
0.43019095, 0.41900828, 0.40838328, 0.39837241, 0.38902304,
0.38037348, 0.37245339, 0.36528422, 0.35888002, 0.35324812,
0.34839005, 0.34430235, 0.34097738, 0.3384041 , 0.33656874,
0.33545546, 0.3350468 , 0.3353242 , 0.33626834, 0.33785938,
0.34007726, 0.34290179, 0.34631281, 0.35029021, 0.35460871,
0.35899677, 0.36385087, 0.36914875, 0.374868 , 0.38098599,
0.3874798 , 0.39432619, 0.40150151, 0.4089817 , 0.41674221,
0.42475803, 0.43300368, 0.44145321, 0.45008021, 0.45885791,
0.46775917, 0.4767566 , 0.4858226 , 0.4949295 , 0.50404957,
0.51315521, 0.52221897, 0.53121369, 0.54011256, 0.5488892 ,
0.55751777, 0.56597297, 0.57423011, 0.58226515, 0.59005469,
0.59757597, 0.60480687, 0.61172586, 0.61831194, 0.62454461,
0.63040379, 0.63586971, 0.64092289, 0.645544 , 0.64971378,
0.65341298, 0.65662231, 0.65932232, 0.66149343, 0.66311588,
0.66420495, 0.66492791, 0.66502809, 0.66448647, 0.66328438,
0.6614038 , 0.65882772, 0.65554061, 0.65152894, 0.64678181,
0.64129165, 0.63505501, 0.62807333, 0.62035387, 0.61191049,
0.60276447, 0.59294524, 0.58249086, 0.57144843, 0.55987415,
0.54783307, 0.53539856, 0.52265139, 0.5096785 , 0.49657147]])
SKY_BETA20_C0_D1 = np.array(
[[0.02721499, 0.0283665 , 0.02958612, 0.03087902, 0.03225084,
0.03370777, 0.03525655, 0.03690459, 0.03866 , 0.04053169,
0.04252945, 0.04466404, 0.04694732, 0.04939235, 0.05201353,
0.05482677, 0.05784964, 0.06110152, 0.06460389, 0.06838047,
0.07245748, 0.07686393, 0.08163185, 0.08679662, 0.09239721,
0.0984765 , 0.10508156, 0.11226383, 0.12007933, 0.12858868,
0.13785703, 0.14795376, 0.15895183, 0.17092689, 0.18395573,
0.19811434, 0.21347513, 0.23010344, 0.24805335, 0.26736258,
0.28804702, 0.31009493, 0.3334614 , 0.35806381, 0.38377894,
0.41044252, 0.4378518 , 0.46577152, 0.49394289, 0.52209507,
0.54995789, 0.57727461, 0.60381305, 0.62937442, 0.65379876,
0.67696726, 0.6988015 , 0.71926031, 0.73833509, 0.75604423,
0.77242734, 0.78753973, 0.80144751, 0.81422338, 0.82594327,
0.83668366, 0.84651965, 0.85552351, 0.86376381, 0.87130483,
0.87820626, 0.88452317, 0.89030601, 0.89560085, 0.90044957,
0.90489014, 0.90895687, 0.91268077, 0.91608977, 0.91920901,
0.92206108, 0.92466627, 0.92704278, 0.9292069 , 0.93117321,
0.93295473, 0.93456304, 0.93600842, 0.9373 , 0.93844578,
0.93945279, 0.94032712, 0.941074 , 0.94169784, 0.94220232,
0.94259038, 0.94286425, 0.94302554, 0.94307515, 0.94301338],
[0.02721499, 0.0283665 , 0.02958612, 0.03087902, 0.03225084,
0.03370777, 0.03525655, 0.03690459, 0.03866 , 0.04053169,
0.04252945, 0.04466404, 0.04694732, 0.04939235, 0.05201353,
0.05482677, 0.05784964, 0.06110152, 0.06460389, 0.06838047,
0.07245748, 0.07686393, 0.08163185, 0.08679662, 0.09239721,
0.0984765 , 0.10508156, 0.11226383, 0.12007933, 0.12858868,
0.13785703, 0.14795376, 0.15895183, 0.17092689, 0.18395573,
0.19811434, 0.21347513, 0.23010344, 0.24805335, 0.26736258,
0.28804702, 0.31009493, 0.3334614 , 0.35806381, 0.38377894,
0.41044252, 0.4378518 , 0.46577152, 0.49394289, 0.52209507,
0.54995789, 0.57727461, 0.60381305, 0.62937442, 0.65379876,
0.67696726, 0.6988015 , 0.71926031, 0.73833509, 0.75604423,
0.77242734, 0.78753973, 0.80144751, 0.81422338, 0.82594327,
0.83668366, 0.84651965, 0.85552351, 0.86376381, 0.87130483,
0.87820626, 0.88452317, 0.89030601, 0.89560085, 0.90044957,
0.90489014, 0.90895687, 0.91268077, 0.91608977, 0.91920901,
0.92206108, 0.92466627, 0.92704278, 0.9292069 , 0.93117321,
0.93295473, 0.93456304, 0.93600842, 0.9373 , 0.93844578,
0.93945279, 0.94032712, 0.941074 , 0.94169784, 0.94220232,
0.94259038, 0.94286425, 0.94302554, 0.94307515, 0.94301338]])
SKY_BETA160_C0_D1 = np.array(
[[0.00384724, 0.00384054, 0.00383385, 0.00382718, 0.00382052,
0.00381388, 0.00380726, 0.00380064, 0.00379405, 0.00378746,
0.0037809 , 0.00377434, 0.0037678 , 0.00376128, 0.00375477,
0.00374828, 0.0037418 , 0.00373533, 0.00372888, 0.00372244,
0.00371602, 0.00370961, 0.00370321, 0.00369683, 0.00369047,
0.00368412, 0.00367778, 0.00367145, 0.00366514, 0.00365885,
0.00365256, 0.0036463 , 0.00364004, 0.0036338 , 0.00362757,
0.00362136, 0.00361516, 0.00360898, 0.0036028 , 0.00359665,
0.0035905 , 0.00358437, 0.00357825, 0.00357215, 0.00356606,
0.00355998, 0.00355391, 0.00354786, 0.00354183, 0.0035358 ,
0.00352979, 0.00352379, 0.00351781, 0.00351183, 0.00350588,
0.00349993, 0.003494 , 0.00348808, 0.00348217, 0.00347628,
0.0034704 , 0.00346453, 0.00345867, 0.00345283, 0.003447 ,
0.00344118, 0.00343538, 0.00342958, 0.0034238 , 0.00341804,
0.00341228, 0.00340654, 0.00340081, 0.0033951 , 0.00338939,
0.0033837 , 0.00337802, 0.00337235, 0.0033667 , 0.00336105,
0.00335542, 0.0033498 , 0.0033442 , 0.0033386 , 0.00333302,
0.00332745, 0.00332189, 0.00331634, 0.00331081, 0.00330529,
0.00329978, 0.00329428, 0.00328879, 0.00328332, 0.00327785,
0.0032724 , 0.00326696, 0.00326153, 0.00325612, 0.00325071],
[0.00384724, 0.00384054, 0.00383385, 0.00382718, 0.00382052,
0.00381388, 0.00380726, 0.00380064, 0.00379405, 0.00378746,
0.0037809 , 0.00377434, 0.0037678 , 0.00376128, 0.00375477,
0.00374828, 0.0037418 , 0.00373533, 0.00372888, 0.00372244,
0.00371602, 0.00370961, 0.00370321, 0.00369683, 0.00369047,
0.00368412, 0.00367778, 0.00367145, 0.00366514, 0.00365885,
0.00365256, 0.0036463 , 0.00364004, 0.0036338 , 0.00362757,
0.00362136, 0.00361516, 0.00360898, 0.0036028 , 0.00359665,
0.0035905 , 0.00358437, 0.00357825, 0.00357215, 0.00356606,
0.00355998, 0.00355391, 0.00354786, 0.00354183, 0.0035358 ,
0.00352979, 0.00352379, 0.00351781, 0.00351183, 0.00350588,
0.00349993, 0.003494 , 0.00348808, 0.00348217, 0.00347628,
0.0034704 , 0.00346453, 0.00345867, 0.00345283, 0.003447 ,
0.00344118, 0.00343538, 0.00342958, 0.0034238 , 0.00341804,
0.00341228, 0.00340654, 0.00340081, 0.0033951 , 0.00338939,
0.0033837 , 0.00337802, 0.00337235, 0.0033667 , 0.00336105,
0.00335542, 0.0033498 , 0.0033442 , 0.0033386 , 0.00333302,
0.00332745, 0.00332189, 0.00331634, 0.00331081, 0.00330529,
0.00329978, 0.00329428, 0.00328879, 0.00328332, 0.00327785,
0.0032724 , 0.00326696, 0.00326153, 0.00325612, 0.00325071]])
SKY_BETA160_C1_D1 = np.array(
[[0.3114026 , 0.31120886, 0.31101505, 0.31082117, 0.31062722,
0.3104332 , 0.3102391 , 0.31004494, 0.30985071, 0.30965641,
0.30946205, 0.30926761, 0.30907311, 0.30887854, 0.3086839 ,
0.3084892 , 0.30829443, 0.30809959, 0.30790469, 0.30770973,
0.3075147 , 0.3073196 , 0.30712444, 0.30692922, 0.30673394,
0.30653859, 0.30634318, 0.30614771, 0.30595217, 0.30575658,
0.30556092, 0.30536521, 0.30516943, 0.30497359, 0.3047777 ,
0.30458174, 0.30438573, 0.30418966, 0.30399353, 0.30379734,
0.30360109, 0.30340479, 0.30320844, 0.30301202, 0.30281555,
0.30261903, 0.30242244, 0.30222581, 0.30202912, 0.30183238,
0.30163558, 0.30143873, 0.30124183, 0.30104487, 0.30084786,
0.3006508 , 0.30045369, 0.30025653, 0.30005932, 0.29986206,
0.29966474, 0.29946738, 0.29926997, 0.29907251, 0.298875 ,
0.29867745, 0.29847984, 0.29828219, 0.29808449, 0.29788675,
0.29768895, 0.29749112, 0.29729323, 0.29709531, 0.29689733,
0.29669932, 0.29650126, 0.29630315, 0.296105 , 0.29590681,
0.29570858, 0.2955103 , 0.29531198, 0.29511362, 0.29491522,
0.29471678, 0.2945183 , 0.29431978, 0.29412122, 0.29392262,
0.29372398, 0.2935253 , 0.29332658, 0.29312783, 0.29292903,
0.29273021, 0.29253134, 0.29233244, 0.2921335 , 0.29193452],
[0.3114026 , 0.31120886, 0.31101505, 0.31082117, 0.31062722,
0.3104332 , 0.3102391 , 0.31004494, 0.30985071, 0.30965641,
0.30946205, 0.30926761, 0.30907311, 0.30887854, 0.3086839 ,
0.3084892 , 0.30829443, 0.30809959, 0.30790469, 0.30770973,
0.3075147 , 0.3073196 , 0.30712444, 0.30692922, 0.30673394,
0.30653859, 0.30634318, 0.30614771, 0.30595217, 0.30575658,
0.30556092, 0.30536521, 0.30516943, 0.30497359, 0.3047777 ,
0.30458174, 0.30438573, 0.30418966, 0.30399353, 0.30379734,
0.30360109, 0.30340479, 0.30320844, 0.30301202, 0.30281555,
0.30261903, 0.30242244, 0.30222581, 0.30202912, 0.30183238,
0.30163558, 0.30143873, 0.30124183, 0.30104487, 0.30084786,
0.3006508 , 0.30045369, 0.30025653, 0.30005932, 0.29986206,
0.29966474, 0.29946738, 0.29926997, 0.29907251, 0.298875 ,
0.29867745, 0.29847984, 0.29828219, 0.29808449, 0.29788675,
0.29768895, 0.29749112, 0.29729323, 0.29709531, 0.29689733,
0.29669932, 0.29650126, 0.29630315, 0.296105 , 0.29590681,
0.29570858, 0.2955103 , 0.29531198, 0.29511362, 0.29491522,
0.29471678, 0.2945183 , 0.29431978, 0.29412122, 0.29392262,
0.29372398, 0.2935253 , 0.29332658, 0.29312783, 0.29292903,
0.29273021, 0.29253134, 0.29233244, 0.2921335 , 0.29193452]])
SKY_BETA20_C1_D1 = np.array(
[[0.48359335, 0.48106107, 0.47853666, 0.47603262, 0.47356126,
0.4711346 , 0.46876431, 0.46646169, 0.46423755, 0.46210222,
0.46006544, 0.45813638, 0.45632354, 0.45463477, 0.45307722,
0.45165731, 0.45038074, 0.44925246, 0.44827667, 0.44745684,
0.44679565, 0.44629509, 0.4459564 , 0.44578011, 0.44576606,
0.44591339, 0.44622062, 0.44668562, 0.44730565, 0.4480774 ,
0.44899698, 0.45006001, 0.45126157, 0.4525963 , 0.45405837,
0.45564154, 0.45733918, 0.45914431, 0.4610496 , 0.46304743,
0.46512988, 0.4672888 , 0.46951581, 0.47180231, 0.47413957,
0.47651868, 0.47893064, 0.48136632, 0.48381656, 0.48627214,
0.48872384, 0.49116241, 0.49357868, 0.49596352, 0.49830788,
0.50060282, 0.50283954, 0.50500939, 0.50708952, 0.50899944,
0.51081437, 0.51252644, 0.51412804, 0.51561185, 0.51697089,
0.51819852, 0.51928846, 0.52023486, 0.52103225, 0.52167564,
0.5221605 , 0.52248279, 0.522639 , 0.52262617, 0.52244188,
0.52208434, 0.52155235, 0.52084535, 0.51996342, 0.51890734,
0.51767854, 0.51627917, 0.51471209, 0.51298085, 0.51108975,
0.50904377, 0.50684862, 0.50451069, 0.50203706, 0.49943547,
0.49671429, 0.49388246, 0.49094953, 0.48792553, 0.48482096,
0.48164675, 0.47841417, 0.47513479, 0.47182042, 0.468483 ],
[0.48359335, 0.48106107, 0.47853666, 0.47603262, 0.47356126,
0.4711346 , 0.46876431, 0.46646169, 0.46423755, 0.46210222,
0.46006544, 0.45813638, 0.45632354, 0.45463477, 0.45307722,
0.45165731, 0.45038074, 0.44925246, 0.44827667, 0.44745684,
0.44679565, 0.44629509, 0.4459564 , 0.44578011, 0.44576606,
0.44591339, 0.44622062, 0.44668562, 0.44730565, 0.4480774 ,
0.44899698, 0.45006001, 0.45126157, 0.4525963 , 0.45405837,
0.45564154, 0.45733918, 0.45914431, 0.4610496 , 0.46304743,
0.46512988, 0.4672888 , 0.46951581, 0.47180231, 0.47413957,
0.47651868, 0.47893064, 0.48136632, 0.48381656, 0.48627214,
0.48872384, 0.49116241, 0.49357868, 0.49596352, 0.49830788,
0.50060282, 0.50283954, 0.50500939, 0.50708952, 0.50899944,
0.51081437, 0.51252644, 0.51412804, 0.51561185, 0.51697089,
0.51819852, 0.51928846, 0.52023486, 0.52103225, 0.52167564,
0.5221605 , 0.52248279, 0.522639 , 0.52262617, 0.52244188,
0.52208434, 0.52155235, 0.52084535, 0.51996342, 0.51890734,
0.51767854, 0.51627917, 0.51471209, 0.51298085, 0.51108975,
0.50904377, 0.50684862, 0.50451069, 0.50203706, 0.49943547,
0.49671429, 0.49388246, 0.49094953, 0.48792553, 0.48482096,
0.48164675, 0.47841417, 0.47513479, 0.47182042, 0.468483 ]])
SKY_BETA20_C1_D0 = np.array(
[[0.09025658, 0.09055715, 0.09085192, 0.09114066, 0.09142313,
0.09169909, 0.09196829, 0.09223048, 0.09248542, 0.09273283,
0.09297247, 0.09320406, 0.09342733, 0.09364202, 0.09384785,
0.09404453, 0.0942318 , 0.09440935, 0.09457692, 0.09473421,
0.09488092, 0.09501677, 0.09514146, 0.0952547 , 0.09535618,
0.09544562, 0.09552272, 0.09558717, 0.09563868, 0.09567695,
0.09570169, 0.09571261, 0.09570941, 0.0956918 , 0.09565949,
0.09561221, 0.09554967, 0.09547159, 0.09537771, 0.09526775,
0.09514146, 0.09499859, 0.09483889, 0.09466211, 0.09446802,
0.09425641, 0.09402706, 0.09377976, 0.09351431, 0.09323055,
0.09292828, 0.09260736, 0.09226763, 0.09190897, 0.09153124,
0.09113435, 0.0907182 , 0.09028272, 0.08982784, 0.08935352,
0.08885973, 0.08834646, 0.08781372, 0.08726153, 0.08668994,
0.086099 , 0.0854888 , 0.08485944, 0.08421102, 0.0835437 ,
0.08285763, 0.08215298, 0.08142994, 0.08068875, 0.07992961,
0.0791528 , 0.07835859, 0.07754725, 0.07671911, 0.07587448,
0.07501371, 0.07413717, 0.07324522, 0.07233826, 0.07141669,
0.07048094, 0.06953143, 0.06856862, 0.06759297, 0.06660493,
0.065605 , 0.06459365, 0.06357138, 0.06253868, 0.06149607,
0.06044405, 0.05938314, 0.05831385, 0.05723669, 0.05615218],
[0.09025658, 0.09055715, 0.09085192, 0.09114066, 0.09142313,
0.09169909, 0.09196829, 0.09223048, 0.09248542, 0.09273283,
0.09297247, 0.09320406, 0.09342733, 0.09364202, 0.09384785,
0.09404453, 0.0942318 , 0.09440935, 0.09457692, 0.09473421,
0.09488092, 0.09501677, 0.09514146, 0.0952547 , 0.09535618,
0.09544562, 0.09552272, 0.09558717, 0.09563868, 0.09567695,
0.09570169, 0.09571261, 0.09570941, 0.0956918 , 0.09565949,
0.09561221, 0.09554967, 0.09547159, 0.09537771, 0.09526775,
0.09514146, 0.09499859, 0.09483889, 0.09466211, 0.09446802,
0.09425641, 0.09402706, 0.09377976, 0.09351431, 0.09323055,
0.09292828, 0.09260736, 0.09226763, 0.09190897, 0.09153124,
0.09113435, 0.0907182 , 0.09028272, 0.08982784, 0.08935352,
0.08885973, 0.08834646, 0.08781372, 0.08726153, 0.08668994,
0.086099 , 0.0854888 , 0.08485944, 0.08421102, 0.0835437 ,
0.08285763, 0.08215298, 0.08142994, 0.08068875, 0.07992961,
0.0791528 , 0.07835859, 0.07754725, 0.07671911, 0.07587448,
0.07501371, 0.07413717, 0.07324522, 0.07233826, 0.07141669,
0.07048094, 0.06953143, 0.06856862, 0.06759297, 0.06660493,
0.065605 , 0.06459365, 0.06357138, 0.06253868, 0.06149607,
0.06044405, 0.05938314, 0.05831385, 0.05723669, 0.05615218]])
SKY_BETA160_C1_D0 = np.array(
[[0.0005987 , 0.00179573, 0.00299166, 0.00418578, 0.00537735,
0.00656566, 0.00774999, 0.00892963, 0.0101039 , 0.01127208,
0.01243352, 0.01358753, 0.01473347, 0.0158707 , 0.01699857,
0.01811649, 0.01922386, 0.02032009, 0.02140464, 0.02247695,
0.0235365 , 0.02458279, 0.02561534, 0.02663368, 0.02763738,
0.02862601, 0.02959919, 0.03055652, 0.03149767, 0.0324223 ,
0.0333301 , 0.03422078, 0.0350941 , 0.0359498 , 0.03678767,
0.0376075 , 0.03840914, 0.03919242, 0.03995722, 0.04070342,
0.04143094, 0.0421397 , 0.04282965, 0.04350077, 0.04415304,
0.04478647, 0.04540109, 0.04599692, 0.04657403, 0.0471325 ,
0.0476724 , 0.04819385, 0.04869696, 0.04918185, 0.04964868,
0.05009759, 0.05052876, 0.05094236, 0.05133857, 0.0517176 ,
0.05207964, 0.05242492, 0.05275366, 0.05306608, 0.05336243,
0.05364293, 0.05390785, 0.05415743, 0.05439194, 0.05461162,
0.05481675, 0.0550076 , 0.05518443, 0.05534752, 0.05549714,
0.05563356, 0.05575708, 0.05586795, 0.05596646, 0.05605289,
0.05612752, 0.05619061, 0.05624245, 0.05628331, 0.05631347,
0.05633319, 0.05634274, 0.05634239, 0.05633241, 0.05631306,
0.05628459, 0.05624726, 0.05620133, 0.05614705, 0.05608466,
0.05601441, 0.05593653, 0.05585127, 0.05575886, 0.05565952],
[0.0005987 , 0.00179573, 0.00299166, 0.00418578, 0.00537735,
0.00656566, 0.00774999, 0.00892963, 0.0101039 , 0.01127208,
0.01243352, 0.01358753, 0.01473347, 0.0158707 , 0.01699857,
0.01811649, 0.01922386, 0.02032009, 0.02140464, 0.02247695,
0.0235365 , 0.02458279, 0.02561534, 0.02663368, 0.02763738,
0.02862601, 0.02959919, 0.03055652, 0.03149767, 0.0324223 ,
0.0333301 , 0.03422078, 0.0350941 , 0.0359498 , 0.03678767,
0.0376075 , 0.03840914, 0.03919242, 0.03995722, 0.04070342,
0.04143094, 0.0421397 , 0.04282965, 0.04350077, 0.04415304,
0.04478647, 0.04540109, 0.04599692, 0.04657403, 0.0471325 ,
0.0476724 , 0.04819385, 0.04869696, 0.04918185, 0.04964868,
0.05009759, 0.05052876, 0.05094236, 0.05133857, 0.0517176 ,
0.05207964, 0.05242492, 0.05275366, 0.05306608, 0.05336243,
0.05364293, 0.05390785, 0.05415743, 0.05439194, 0.05461162,
0.05481675, 0.0550076 , 0.05518443, 0.05534752, 0.05549714,
0.05563356, 0.05575708, 0.05586795, 0.05596646, 0.05605289,
0.05612752, 0.05619061, 0.05624245, 0.05628331, 0.05631347,
0.05633319, 0.05634274, 0.05634239, 0.05633241, 0.05631306,
0.05628459, 0.05624726, 0.05620133, 0.05614705, 0.05608466,
0.05601441, 0.05593653, 0.05585127, 0.05575886, 0.05565952]])
SKY_BETA160_C05_D0 = np.array(
[[0.0019083 , 0.00572098, 0.00952188, 0.01330324, 0.0170574 ,
0.02077689, 0.02445443, 0.028083 , 0.03165587, 0.03516663,
0.03860922, 0.04197798, 0.04526763, 0.04847333, 0.05159066,
0.05461567, 0.05754483, 0.06037508, 0.0631038 , 0.06572882,
0.06824839, 0.0706612 , 0.07296634, 0.07516327, 0.07725184,
0.07923225, 0.08110501, 0.08287096, 0.08453121, 0.08608712,
0.08754031, 0.08889258, 0.09014596, 0.09130261, 0.09236487,
0.09333518, 0.09421611, 0.0950103 , 0.09572047, 0.0963494 ,
0.0968999 , 0.0973748 , 0.09777697, 0.09810924, 0.09837448,
0.09857549, 0.09871507, 0.09879598, 0.09882094, 0.09879261,
0.09871361, 0.09858648, 0.09841371, 0.09819775, 0.09794093,
0.09764555, 0.09731382, 0.09694789, 0.09654982, 0.09612162,
0.09566521, 0.09518243, 0.09467507, 0.09414484, 0.09359337,
0.09302222, 0.09243291, 0.09182686, 0.09120544, 0.09056996,
0.08992166, 0.08926174, 0.08859131, 0.08791146, 0.08722319,
0.08652749, 0.08582525, 0.08511736, 0.08440462, 0.08368783,
0.08296772, 0.08224497, 0.08152024, 0.08079415, 0.08006728,
0.07934017, 0.07861333, 0.07788726, 0.07716238, 0.07643914,
0.07571792, 0.0749991 , 0.07428301, 0.07356998, 0.0728603 ,
0.07215426, 0.07145211, 0.07075408, 0.0700604 , 0.06937127],
[0.0019083 , 0.00572098, 0.00952188, 0.01330324, 0.0170574 ,
0.02077689, 0.02445443, 0.028083 , 0.03165587, 0.03516663,
0.03860922, 0.04197798, 0.04526763, 0.04847333, 0.05159066,
0.05461567, 0.05754483, 0.06037508, 0.0631038 , 0.06572882,
0.06824839, 0.0706612 , 0.07296634, 0.07516327, 0.07725184,
0.07923225, 0.08110501, 0.08287096, 0.08453121, 0.08608712,
0.08754031, 0.08889258, 0.09014596, 0.09130261, 0.09236487,
0.09333518, 0.09421611, 0.0950103 , 0.09572047, 0.0963494 ,
0.0968999 , 0.0973748 , 0.09777697, 0.09810924, 0.09837448,
0.09857549, 0.09871507, 0.09879598, 0.09882094, 0.09879261,
0.09871361, 0.09858648, 0.09841371, 0.09819775, 0.09794093,
0.09764555, 0.09731382, 0.09694789, 0.09654982, 0.09612162,
0.09566521, 0.09518243, 0.09467507, 0.09414484, 0.09359337,
0.09302222, 0.09243291, 0.09182686, 0.09120544, 0.09056996,
0.08992166, 0.08926174, 0.08859131, 0.08791146, 0.08722319,
0.08652749, 0.08582525, 0.08511736, 0.08440462, 0.08368783,
0.08296772, 0.08224497, 0.08152024, 0.08079415, 0.08006728,
0.07934017, 0.07861333, 0.07788726, 0.07716238, 0.07643914,
0.07571792, 0.0749991 , 0.07428301, 0.07356998, 0.0728603 ,
0.07215426, 0.07145211, 0.07075408, 0.0700604 , 0.06937127]])
SKY_BETA20_C05_D0 = np.array(
[[0.09228642, 0.09322975, 0.09418129, 0.09514089, 0.09610841,
0.09708366, 0.09806647, 0.09905662, 0.10005388, 0.10105801,
0.10206874, 0.10308576, 0.10410876, 0.1051374 , 0.1061713 ,
0.10721007, 0.10825326, 0.10930043, 0.11035108, 0.11140467,
0.11246065, 0.11351841, 0.11457731, 0.11563666, 0.11669575,
0.1177538 , 0.11880999, 0.11986347, 0.12091331, 0.12195855,
0.12299818, 0.12403112, 0.12505623, 0.12607232, 0.12707815,
0.1280724 , 0.1290537 , 0.13002059, 0.13097158, 0.13190509,
0.13281946, 0.133713 , 0.13458391, 0.13543033, 0.13625035,
0.13704196, 0.13780311, 0.13853166, 0.1392254 , 0.13988208,
0.14049937, 0.14107488, 0.14160618, 0.14209076, 0.14241963,
0.14264508, 0.14281147, 0.14291603, 0.14295597, 0.14292848,
0.14283078, 0.14266007, 0.1424136 , 0.14208863, 0.14168249,
0.1411926 , 0.14061643, 0.13995158, 0.13919576, 0.13834685,
0.13740287, 0.13636205, 0.1352228 , 0.1339838 , 0.13264396,
0.1312025 , 0.1296589 , 0.12801301, 0.12626498, 0.12441538,
0.12246511, 0.12041551, 0.11826831, 0.1160257 , 0.11369029,
0.11126511, 0.10875367, 0.10615991, 0.10348821, 0.10074338,
0.09793063, 0.09505559, 0.09212424, 0.08914292, 0.08611827,
0.08305723, 0.07996696, 0.07685483, 0.07372836, 0.07059518],
[0.09228642, 0.09322975, 0.09418129, 0.09514089, 0.09610841,
0.09708366, 0.09806647, 0.09905662, 0.10005388, 0.10105801,
0.10206874, 0.10308576, 0.10410876, 0.1051374 , 0.1061713 ,
0.10721007, 0.10825326, 0.10930043, 0.11035108, 0.11140467,
0.11246065, 0.11351841, 0.11457731, 0.11563666, 0.11669575,
0.1177538 , 0.11880999, 0.11986347, 0.12091331, 0.12195855,
0.12299818, 0.12403112, 0.12505623, 0.12607232, 0.12707815,
0.1280724 , 0.1290537 , 0.13002059, 0.13097158, 0.13190509,
0.13281946, 0.133713 , 0.13458391, 0.13543033, 0.13625035,
0.13704196, 0.13780311, 0.13853166, 0.1392254 , 0.13988208,
0.14049937, 0.14107488, 0.14160618, 0.14209076, 0.14241963,
0.14264508, 0.14281147, 0.14291603, 0.14295597, 0.14292848,
0.14283078, 0.14266007, 0.1424136 , 0.14208863, 0.14168249,
0.1411926 , 0.14061643, 0.13995158, 0.13919576, 0.13834685,
0.13740287, 0.13636205, 0.1352228 , 0.1339838 , 0.13264396,
0.1312025 , 0.1296589 , 0.12801301, 0.12626498, 0.12441538,
0.12246511, 0.12041551, 0.11826831, 0.1160257 , 0.11369029,
0.11126511, 0.10875367, 0.10615991, 0.10348821, 0.10074338,
0.09793063, 0.09505559, 0.09212424, 0.08914292, 0.08611827,
0.08305723, 0.07996696, 0.07685483, 0.07372836, 0.07059518]])
|
479564
|
import json
import os
import requests
import dotenv
# This zone ID may change if/when our account changes
# Run `list_cloudflare_zones` (below) to get a full list
ZONE_ID = "198bb61a3679d0e1545e838a8f0c25b9"
def list_cloudflare_zones():
url = "https://api.cloudflare.com/client/v4/zones"
headers = {
"Content-Type": "application/json",
"X-Auth-Key": os.environ["CF_API_KEY"],
"X-Auth-Email": os.environ["CF_API_EMAIL"],
}
result = json.loads(requests.get(url, headers=headers).text)
zones = [{"name": x["name"], "id": x["id"]} for x in result["result"]]
print(zones)
def clear_cloudflare():
url = "https://api.cloudflare.com/client/v4/zones/%s"
headers = {
"Content-Type": "application/json",
"X-Auth-Key": os.environ["CF_API_KEY"],
"X-Auth-Email": os.environ["CF_API_EMAIL"],
}
data = {"purge_everything": True}
result = json.loads(
requests.delete(
url % ZONE_ID + "/purge_cache", headers=headers, data=json.dumps(data)
).text
)
if result["success"]:
print("Cloudflare clearing succeeded")
else:
raise ValueError(
"Cloudflare clearing failed: %s" % json.dumps(result, indent=2)
)
if __name__ == "__main__":
env_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "..", "environment"
)
dotenv.read_dotenv(env_path, override=True)
clear_cloudflare()
|
479578
|
import unittest
from nlgeval.pycocoevalcap.meteor.meteor import Meteor
class TestMeteor(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.m = Meteor()
def test_compute_score(self):
s = self.m.compute_score({0: ["test"]}, {0: ["test"]})
self.assertEqual(s, (1.0, [1.0]))
s = self.m.compute_score({0: ["テスト"]}, {0: ["テスト"]})
self.assertEqual(s, (1.0, [1.0]))
|
479613
|
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import Model as M
import hrnet
class HRNET(M.Model):
def initialize(self, num_pts):
self.backbone = hrnet.ResNet()
self.lastconv = M.ConvLayer(1, num_pts)
def forward(self, x):
x = self.backbone(x)
x = self.lastconv(x)
return x
net = HRNET(17)
net.eval()
net.record()
print(net.lastconv.record)
dummy_inp = np.ones([1,3,256,256])
dummy_inp = np.float32(dummy_inp)
dummy_inp = torch.from_numpy(dummy_inp)
y = net(dummy_inp)
# print(y.shape)
import Layers as L
vs = L.record_params
# print(len(L.record_params))
# print(vs[0].keys())
import pickle
data = pickle.load(open('hrnet_variables.pkl' , 'rb'))
for vsrc, vtgt in zip(data, vs):
print(vsrc.keys())
print(vtgt.keys())
print('------')
for k in vsrc.keys():
if 'kernel' in k:
v = torch.from_numpy(np.transpose(vsrc[k], [3,2,0,1]))
vtgt['conv.weight'].data[:] = v
else:
v = torch.from_numpy(vsrc[k])
if 'bias' in k:
vtgt['conv.bias'].data[:] = v
if 'gamma' in k:
vtgt['bn.weight'].data[:] = v
if 'beta' in k:
vtgt['bn.bias'].data[:] = v
if 'moving_average' in k:
vtgt['bn.running_mean'].data[:] = v
if 'variance' in k:
vtgt['bn.running_var'].data[:] = v
y = net(dummy_inp)
print(y)
print(y.shape)
M.Saver(net).save('./modeltorch/hrnet.pth')
|
479620
|
from namespace_class import *
try:
p = Private1()
error = 1
except:
error = 0
if (error):
raise RuntimeError, "Private1 is private"
try:
p = Private2()
error = 1
except:
error = 0
if (error):
raise RuntimeError, "Private2 is private"
EulerT3D.toFrame(1, 1, 1)
b = BooT_i()
b = BooT_H()
f = FooT_i()
f.quack(1)
f = FooT_d()
f.moo(1)
f = FooT_H()
f.foo(Hi)
f_type = str(type(f))
if f_type.find("'namespace_class.FooT_H'") == -1:
raise RuntimeError("Incorrect type: " + f_type)
|
479623
|
import os
import shutil
import unittest
import bilby
from bilby.bilby_mcmc.sampler import Bilby_MCMC, BilbyMCMCSampler, _initialize_global_variables
from bilby.bilby_mcmc.utils import ConvergenceInputs
from bilby.core.sampler.base_sampler import SamplerError
import numpy as np
import pandas as pd
class TestBilbyMCMCSampler(unittest.TestCase):
def setUp(self):
default_kwargs = Bilby_MCMC.default_kwargs
default_kwargs["target_nsamples"] = 100
default_kwargs["L1steps"] = 1
self.convergence_inputs = ConvergenceInputs(
**{key: default_kwargs[key] for key in ConvergenceInputs._fields}
)
self.outdir = "bilby_mcmc_sampler_test"
if os.path.isdir(self.outdir) is False:
os.mkdir(self.outdir)
def model(time, m, c):
return time * m + c
injection_parameters = dict(m=0.5, c=0.2)
sampling_frequency = 10
time_duration = 10
time = np.arange(0, time_duration, 1 / sampling_frequency)
N = len(time)
sigma = np.random.normal(1, 0.01, N)
data = model(time, **injection_parameters) + np.random.normal(0, sigma, N)
likelihood = bilby.likelihood.GaussianLikelihood(time, data, model, sigma)
# From hereon, the syntax is exactly equivalent to other bilby examples
# We make a prior
priors = dict()
priors['m'] = bilby.core.prior.Uniform(0, 5, 'm')
priors['c'] = bilby.core.prior.Uniform(-2, 2, 'c')
priors = bilby.core.prior.PriorDict(priors)
search_parameter_keys = ['m', 'c']
use_ratio = False
_initialize_global_variables(likelihood, priors, search_parameter_keys, use_ratio)
def tearDown(self):
if os.path.isdir(self.outdir):
shutil.rmtree(self.outdir)
def test_None_proposal_cycle(self):
with self.assertRaises(SamplerError):
BilbyMCMCSampler(
convergence_inputs=self.convergence_inputs,
proposal_cycle=None,
beta=1,
Tindex=0,
Eindex=0,
use_ratio=False
)
def test_default_proposal_cycle(self):
sampler = BilbyMCMCSampler(
convergence_inputs=self.convergence_inputs,
proposal_cycle="default_noNFnoGMnoKD",
beta=1,
Tindex=0,
Eindex=0,
use_ratio=False
)
nsteps = 0
while sampler.nsamples < 500:
sampler.step()
nsteps += 1
self.assertEqual(sampler.chain.position, nsteps)
self.assertEqual(sampler.accepted + sampler.rejected, nsteps)
self.assertTrue(isinstance(sampler.samples, pd.DataFrame))
if __name__ == "__main__":
unittest.main()
|
479626
|
from pythonwarrior.abilities.base import AbilityBase
class Pivot(AbilityBase):
ROTATION_DIRECTIONS = ['forward', 'right', 'backward', 'left']
def description(self):
return "Rotate 'left', 'right', or 'backward' (default)"
def perform(self, direction='backward'):
self.verify_direction(direction)
self._unit.position.rotate(self.ROTATION_DIRECTIONS.index(direction))
self._unit.say("pivots %s" % direction)
|
479628
|
import os
import argparse
import molbart.util as util
from molbart.models.pre_train import BARTModel, UnifiedModel
from molbart.decoder import DecodeSampler
# Default training hyperparameters
DEFAULT_BATCH_SIZE = 128
DEFAULT_ACC_BATCHES = 1
DEFAULT_MASK_PROB = 0.10
DEFAULT_MASK_SCHEME = "span"
DEFAULT_LR = 1.0
DEFAULT_WEIGHT_DECAY = 0.0
DEFAULT_EPOCHS = 10
DEFAULT_GRAD_CLIP = 1.0
DEFAULT_TRAIN_TOKENS = None
DEFAULT_NUM_BUCKETS = 12
DEFAULT_LIMIT_VAL_BATCHES = 1.0
DEFAULT_SCHEDULE = "transformer"
DEFAULT_WARM_UP_STEPS = 8000
DEFAULT_TASK = "mask_aug"
DEFAULT_AUGMENT = True
def build_model(args, sampler, vocab_size, total_steps, pad_token_idx):
# These args don't affect the model directly but will be saved by lightning as hparams
# Tensorboard doesn't like None so we need to convert to string
augment = "None" if args.augment is None else args.augment
train_tokens = "None" if args.train_tokens is None else args.train_tokens
num_buckets = "None" if args.num_buckets is None else args.num_buckets
extra_args = {
"batch_size": args.batch_size,
"acc_batches": args.acc_batches,
"mask_prob": args.mask_prob,
"epochs": args.epochs,
"clip_grad": args.clip_grad,
"train_tokens": train_tokens,
"num_buckets": num_buckets,
"limit_val_batches": args.limit_val_batches,
"augment": augment,
"task": args.task,
"mask_scheme": args.mask_scheme,
"model_type": args.model_type
}
if args.model_type == "bart":
model = BARTModel(
sampler,
pad_token_idx,
vocab_size,
args.d_model,
args.num_layers,
args.num_heads,
args.d_feedforward,
args.lr,
args.weight_decay,
args.activation,
total_steps,
args.max_seq_len,
schedule=args.schedule,
warm_up_steps=args.warm_up_steps,
dropout=util.DEFAULT_DROPOUT,
**extra_args
)
elif args.model_type == "unified":
model = UnifiedModel(
sampler,
pad_token_idx,
vocab_size,
args.d_model,
args.num_layers,
args.num_heads,
args.d_feedforward,
args.lr,
args.weight_decay,
args.activation,
total_steps,
args.max_seq_len,
schedule=args.schedule,
warm_up_steps=args.warm_up_steps,
dropout=util.DEFAULT_DROPOUT,
**extra_args
)
else:
raise ValueError(f"Unknown model type {args.model_type}")
return model
def main(args):
util.seed_everything(37)
if args.dataset == "zinc" and args.train_tokens is not None:
raise ValueError("train_tokens arg must be None when using zinc dataset.")
if args.gpus > 1 and args.train_tokens is not None:
raise ValueError("train_tokens arg must be None when training on multiple gpus.")
print("Building tokeniser...")
tokeniser = util.load_tokeniser(args.vocab_path, args.chem_token_start_idx)
tokeniser.mask_prob = args.mask_prob
tokeniser.mask_scheme = args.mask_scheme
print("Finished tokeniser.")
print("Reading dataset...")
dataset = util.build_dataset(args)
print("Finished dataset.")
print("Building data module...")
dm = util.build_molecule_datamodule(args, dataset, tokeniser)
num_available_cpus = len(os.sched_getaffinity(0))
num_workers = num_available_cpus // args.gpus
dm._num_workers = num_workers
print(f"Using {str(num_workers)} workers for data module.")
print("Finished data module.")
vocab_size = len(tokeniser)
train_steps = util.calc_train_steps(args, dm)
print(f"Train steps: {train_steps}")
sampler = DecodeSampler(tokeniser, args.max_seq_len)
pad_token_idx = tokeniser.vocab[tokeniser.pad_token]
print("Building model...")
model = build_model(args, sampler, vocab_size, train_steps, pad_token_idx)
print("Finished model.")
print("Building trainer...")
trainer = util.build_trainer(args)
print("Finished trainer.")
print("Fitting data module to model")
trainer.fit(model, dm)
print("Finished training.")
print("Printing unknown tokens...")
tokeniser.print_unknown_tokens()
print("Complete.")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Program level args
parser.add_argument("--dataset", type=str)
parser.add_argument("--data_path", type=str)
parser.add_argument("--model_type", type=str)
parser.add_argument("--vocab_path", type=str, default=util.DEFAULT_VOCAB_PATH)
parser.add_argument("--chem_token_start_idx", type=int, default=util.DEFAULT_CHEM_TOKEN_START)
parser.add_argument("--log_dir", type=str, default=util.DEFAULT_LOG_DIR)
parser.add_argument("--deepspeed_config_path", type=str, default=util.DEFAULT_DEEPSPEED_CONFIG_PATH)
# Model and training args
parser.add_argument("--batch_size", type=int, default=DEFAULT_BATCH_SIZE)
parser.add_argument("--acc_batches", type=int, default=DEFAULT_ACC_BATCHES)
parser.add_argument("--max_seq_len", type=int, default=util.DEFAULT_MAX_SEQ_LEN)
parser.add_argument("--mask_prob", type=float, default=DEFAULT_MASK_PROB)
parser.add_argument("--mask_scheme", type=str, default=DEFAULT_MASK_SCHEME)
parser.add_argument("--d_model", type=int, default=util.DEFAULT_D_MODEL)
parser.add_argument("--num_layers", type=int, default=util.DEFAULT_NUM_LAYERS)
parser.add_argument("--num_heads", type=int, default=util.DEFAULT_NUM_HEADS)
parser.add_argument("--d_feedforward", type=int, default=util.DEFAULT_D_FEEDFORWARD)
parser.add_argument("--lr", type=float, default=DEFAULT_LR)
parser.add_argument("--weight_decay", type=float, default=DEFAULT_WEIGHT_DECAY)
parser.add_argument("--epochs", type=int, default=DEFAULT_EPOCHS)
parser.add_argument("--activation", type=str, default=util.DEFAULT_ACTIVATION)
parser.add_argument("--clip_grad", type=float, default=DEFAULT_GRAD_CLIP)
parser.add_argument("--train_tokens", type=int, default=DEFAULT_TRAIN_TOKENS)
parser.add_argument("--num_buckets", type=int, default=DEFAULT_NUM_BUCKETS)
parser.add_argument("--limit_val_batches", type=float, default=DEFAULT_LIMIT_VAL_BATCHES)
parser.add_argument("--gpus", type=int, default=util.DEFAULT_GPUS)
parser.add_argument("--num_nodes", type=int, default=util.DEFAULT_NUM_NODES)
parser.add_argument("--task", type=str, default=DEFAULT_TASK)
parser.add_argument("--schedule", type=str, default=DEFAULT_SCHEDULE)
parser.add_argument("--warm_up_steps", type=int, default=DEFAULT_WARM_UP_STEPS)
parser.add_argument("--augment", dest="augment", action="store_true")
parser.add_argument("--no_augment", dest="augment", action="store_false")
parser.set_defaults(augment=DEFAULT_AUGMENT)
args = parser.parse_args()
main(args)
|
479649
|
import os
import time
import client.api
import client.models
from mamba import description, before, after, it
from expects import *
from expects.matchers import Matcher
from common import Config, Service
from common.helper import (make_dynamic_results_config,
check_modules_exists,
get_memory_dynamic_results_fields,
memory_generator_model,
wait_for_buffer_initialization_done)
from common.matcher import (has_location,
has_json_content_type,
raise_api_exception,
be_valid_memory_info,
be_valid_memory_generator,
be_valid_memory_generator_result,
be_valid_dynamic_results)
CONFIG = Config(os.path.join(os.path.dirname(__file__),
os.environ.get('MAMBA_CONFIG', 'config.yaml')))
with description('Memory Generator Module', 'memory') as self:
with before.all:
service = Service(CONFIG.service())
self._process = service.start()
self._api = client.api.MemoryGeneratorApi(service.client())
if not check_modules_exists(service.client(), 'memory'):
self.skip()
with after.all:
try:
for gen in self.api.list_memory_generators():
if gen.running:
self.api.stop_memory_generator(gen.id)
self.api.delete_memory_generator(gen.id)
except AttributeError:
pass
try:
self._process.terminate()
self._process.wait()
except AttributeError:
pass
with description('Information'):
with description('/memory-info'):
with context('PUT'):
with it('not allowed (405)'):
expect(lambda: self._api.api_client.call_api('/memory-info', 'PUT')).to(
raise_api_exception(405, headers={'Allow': "GET"}))
with context('GET'):
with before.all:
self._result = self._api.memory_info_with_http_info(
_return_http_data_only=False)
with it('success (200)'):
expect(self._result[1]).to(equal(200))
with it('has Content-Type: application/json header'):
expect(self._result[2]).to(has_json_content_type)
with it('valid memory info'):
expect(self._result[0]).to(be_valid_memory_info)
with description('Memory Generators'):
with description('/memory-generators'):
with context('PUT'):
with it('not allowed (405)'):
expect(lambda: self._api.api_client.call_api('/memory-generators', 'PUT')).to(
raise_api_exception(405, headers={'Allow': "GET, POST"}))
with context('POST'):
with shared_context('create generator'):
with before.all:
self._result = self._api.create_memory_generator_with_http_info(
self._model, _return_http_data_only=False)
with after.all:
self._api.delete_memory_generator(self._result[0].id)
with it('created (201)'):
expect(self._result[1]).to(equal(201))
with it('has valid Location header'):
expect(self._result[2]).to(has_location('/memory-generators/' + self._result[0].id))
with it('has Content-Type: application/json header'):
expect(self._result[2]).to(has_json_content_type)
with it('returned valid generator'):
expect(self._result[0]).to(be_valid_memory_generator)
with it('has same config'):
if (not self._model.id):
self._model.id = self._result[0].id
self._model.init_percent_complete = self._result[0].init_percent_complete
expect(self._result[0]).to(equal(self._model))
with description('with empty ID'):
with before.all:
self._model = memory_generator_model(self._api.api_client)
with included_context('create generator'):
with it('random ID assigned'):
expect(self._result[0].id).not_to(be_empty)
with description('with specified ID'):
with before.all:
self._model = memory_generator_model(
self._api.api_client, id='some-specified-id')
with included_context('create generator'):
pass
with context('GET'):
with before.all:
model = memory_generator_model(self._api.api_client)
self._g8s = [self._api.create_memory_generator(model)
for a in range(3)]
self._result = self._api.list_memory_generators_with_http_info(
_return_http_data_only=False)
with after.all:
for g7r in self._g8s:
self._api.delete_memory_generator(g7r.id)
with it('success (200)'):
expect(self._result[1]).to(equal(200))
with it('has Content-Type: application/json header'):
expect(self._result[2]).to(has_json_content_type)
with it('return list'):
expect(self._result[0]).not_to(be_empty)
expect(len(self._result[0])).to(equal(len(self._g8s)))
for gen in self._result[0]:
expect(gen).to(be_valid_memory_generator)
with description('/memory-generators/{id}'):
with before.all:
model = memory_generator_model(self._api.api_client)
self._g7r = self._api.create_memory_generator(model)
expect(self._g7r).to(be_valid_memory_generator)
with after.all:
self._api.delete_memory_generator(self._g7r.id)
with context('GET'):
with description('by existing ID'):
with before.all:
self._result = self._api.get_memory_generator_with_http_info(
self._g7r.id, _return_http_data_only=False)
with it('success (200)'):
expect(self._result[1]).to(equal(200))
with it('has Content-Type: application/json header'):
expect(self._result[2]).to(has_json_content_type)
with it('generator object'):
expect(self._result[0]).to(be_valid_memory_generator)
with description('by non-existent ID'):
with it('not found (404)'):
expr = lambda: self._api.get_memory_generator('unknown')
expect(expr).to(raise_api_exception(404))
with description('by invalid ID'):
with it('bad request (400)'):
expr = lambda: self._api.get_memory_generator('bad_id')
expect(expr).to(raise_api_exception(400))
with context('DELETE'):
with description('by existing ID'):
with shared_context('delete generator'):
with it('deleted (204)'):
result = self._api.delete_memory_generator_with_http_info(
self._g7r.id, _return_http_data_only=False)
expect(result[1]).to(equal(204))
with it('not found (404)'):
expr = lambda: self._api.get_memory_generator(self._g7r.id)
expect(expr).to(raise_api_exception(404))
with description('not running generator'):
with before.all:
model = memory_generator_model(
self._api.api_client, running = False)
self._g7r = self._api.create_memory_generator(model)
expect(self._g7r).to(be_valid_memory_generator)
with it('not running'):
result = self._api.get_memory_generator(self._g7r.id)
expect(result.running).to(be_false)
with included_context('delete generator'):
pass
with description('running generator'):
with before.all:
model = memory_generator_model(
self._api.api_client, running = True)
self._g7r = self._api.create_memory_generator(model)
expect(self._g7r).to(be_valid_memory_generator)
with it('running'):
result = self._api.get_memory_generator(self._g7r.id)
expect(result.running).to(be_true)
with included_context('delete generator'):
pass
with description('by non-existent ID'):
with it('not found (404)'):
expr = lambda: self._api.delete_memory_generator('unknown')
expect(expr).to(raise_api_exception(404))
with description('by invalid ID'):
with it('bad request (400)'):
expr = lambda: self._api.delete_memory_generator('bad_id')
expect(expr).to(raise_api_exception(400))
with description('/memory-generators/{id}/start'):
with before.all:
model = memory_generator_model(self._api.api_client)
self._g7r = self._api.create_memory_generator(model)
expect(self._g7r).to(be_valid_memory_generator)
expect(wait_for_buffer_initialization_done(self._api, self._g7r.id, 10)).to(be_true)
with after.all:
self._api.delete_memory_generator(self._g7r.id)
with context('POST'):
with description('by existing ID'):
with before.all:
self._result = self._api.start_memory_generator_with_http_info(
self._g7r.id, _return_http_data_only=False)
with shared_context('start generator'):
with it('is not running'):
expect(self._g7r.running).to(be_false)
with it('started (201)'):
expect(self._result[1]).to(equal(201))
with it('has valid Location header'):
expect(self._result[2]).to(has_location('/memory-generator-results/' + self._result[0].id))
with it('has Content-Type: application/json header'):
expect(self._result[2]).to(has_json_content_type)
with it('returned valid result'):
expect(self._result[0]).to(be_valid_memory_generator_result)
expect(self._result[0].active).to(be_true)
expect(self._result[0].generator_id).to(equal(self._g7r.id))
with it('is running'):
g7r = self._api.get_memory_generator(self._g7r.id)
expect(g7r).to(be_valid_memory_generator)
expect(g7r.running).to(be_true)
with included_context('start generator'):
pass
with description('already running generator'):
with it('bad request (400)'):
expr = lambda: self._api.start_memory_generator(self._g7r.id)
expect(expr).to(raise_api_exception(400))
with description('with Dynamic Results'):
with before.all:
self._api.stop_memory_generator(self._g7r.id)
dynamic = make_dynamic_results_config(
get_memory_dynamic_results_fields())
self._result = self._api.start_memory_generator_with_http_info(
self._g7r.id, dynamic_results=dynamic, _return_http_data_only=False)
with included_context('start generator'):
with it('has valid dynamic results'):
expect(self._result[0].dynamic_results).to(be_valid_dynamic_results)
with description('by non-existent ID'):
with it('not found (404)'):
expr = lambda: self._api.start_memory_generator('unknown')
expect(expr).to(raise_api_exception(404))
with description('by invalid ID'):
with it('bad request (400)'):
expr = lambda: self._api.start_memory_generator('bad_id')
expect(expr).to(raise_api_exception(400))
with description('/memory-generators/{id}/stop'):
with before.all:
model = memory_generator_model(
self._api.api_client, running=True)
self._g7r = self._api.create_memory_generator(model)
expect(self._g7r).to(be_valid_memory_generator)
expect(wait_for_buffer_initialization_done(self._api, self._g7r.id, 10)).to(be_true)
with after.all:
self._api.delete_memory_generator(self._g7r.id)
with context('POST'):
with description('by existing ID'):
with it('is running'):
expect(self._g7r.running).to(be_true)
with it('stopped (204)'):
result = self._api.stop_memory_generator_with_http_info(
self._g7r.id, _return_http_data_only=False)
expect(result[1]).to(equal(204))
with it('is not running'):
g7r = self._api.get_memory_generator(self._g7r.id)
expect(g7r).to(be_valid_memory_generator)
expect(g7r.running).to(be_false)
with description('already stopped generator'):
with it('bad request (400)'):
expr = lambda: self._api.stop_memory_generator(self._g7r.id)
expect(expr).to(raise_api_exception(400))
with description('by non-existent ID'):
with it('not found (404)'):
expr = lambda: self._api.start_memory_generator('unknown')
expect(expr).to(raise_api_exception(404))
with description('by invalid ID'):
with it('bad request (400)'):
expr = lambda: self._api.start_memory_generator('bad_id')
expect(expr).to(raise_api_exception(400))
with description('Memory Generators bulk operations'):
with description('/memory-generators/x/bulk-create'):
with context('PUT'):
with it('not allowed (405)'):
expect(lambda: self._api.api_client.call_api('/memory-generators/x/bulk-create', 'PUT')).to(
raise_api_exception(405, headers={'Allow': "POST"}))
with description('POST'):
with before.all:
self._models = [
memory_generator_model(self._api.api_client),
memory_generator_model(self._api.api_client)
]
request = client.models.BulkCreateMemoryGeneratorsRequest(self._models)
self._result = self._api.bulk_create_memory_generators_with_http_info(
request, _return_http_data_only=False)
with after.all:
for g7r in self._result[0]:
self._api.delete_memory_generator(g7r.id)
with it('created (200)'):
expect(self._result[1]).to(equal(200))
with it('has Content-Type: application/json header'):
expect(self._result[2]).to(has_json_content_type)
with it('returned valid generator list'):
expect(self._result[0]).not_to(be_empty)
expect(len(self._result[0])).to(equal(len(self._models)))
for g7r in self._result[0]:
expect(g7r).to(be_valid_memory_generator)
with it('has same config'):
for idx in range(len(self._models)):
model = self._models[idx]
model.init_percent_complete = self._result[0][idx].init_percent_complete
if (not model.id):
model.id = self._result[0][idx].id
expect(self._result[0][idx]).to(equal(model))
with description('/memory-generators/x/bulk-delete'):
with context('PUT'):
with it('not allowed (405)'):
expect(lambda: self._api.api_client.call_api('/memory-generators/x/bulk-delete', 'PUT')).to(
raise_api_exception(405, headers={'Allow': "POST"}))
with context('POST'):
with before.all:
self._ids = []
self._model = memory_generator_model(
self._api.api_client, running=False)
with shared_context('delete generators'):
with before.all:
self._g8s = [
self._api.create_memory_generator(self._model)
for i in range(3)]
with it('all exist'):
for g7r in self._g8s:
result = self._api.get_memory_generator(g7r.id)
expect(result).to(be_valid_memory_generator)
with it('no content (204)'):
request = client.models.BulkDeleteCpuGeneratorsRequest(
[g7r.id for g7r in self._g8s] + self._ids)
result = self._api.bulk_delete_memory_generators_with_http_info(
request, _return_http_data_only=False)
expect(result[1]).to(equal(204))
with it('all deleted'):
for g7r in self._g8s:
result = lambda: self._api.get_memory_generator(g7r.id)
expect(result).to(raise_api_exception(404))
with description('with existing IDs'):
with included_context('delete generators'):
pass
with description('with non-existent ID'):
with before.all:
self._ids = ['unknown']
with included_context('delete generators'):
pass
with description('with invalid ID'):
with before.all:
self._ids = ['bad_id']
with included_context('delete generators'):
pass
with description('/memory-generators/x/bulk-start'):
with context('PUT'):
with it('not allowed (405)'):
expect(lambda: self._api.api_client.call_api('/memory-generators/x/bulk-start', 'PUT')).to(
raise_api_exception(405, headers={'Allow': "POST"}))
with description('POST'):
with before.all:
model = memory_generator_model(self._api.api_client)
self._g8s = [
self._api.create_memory_generator(model)
for a in range(3)]
for a in range(3):
expect(wait_for_buffer_initialization_done(self._api, self._g8s[a].id, 10)).to(be_true)
with after.all:
request = client.models.BulkDeleteMemoryGeneratorsRequest(
[g7r.id for g7r in self._g8s])
self._api.bulk_delete_memory_generators(request)
with description('by existing IDs'):
with before.all:
request = client.models.BulkStartMemoryGeneratorsRequest(
[g7r.id for g7r in self._g8s])
self._result = self._api.bulk_start_memory_generators_with_http_info(
request, _return_http_data_only=False)
with it('is not running'):
for g7r in self._g8s:
expect(g7r.running).to(be_false)
with it('success (200)'):
expect(self._result[1]).to(equal(200))
with it('has Content-Type: application/json header'):
expect(self._result[2]).to(has_json_content_type)
with it('returned valid results'):
for result in self._result[0]:
expect(result).to(be_valid_memory_generator_result)
expect(result.active).to(be_true)
with it('all started'):
for g7r in self._g8s:
result = self._api.get_memory_generator(g7r.id)
expect(result).to(be_valid_memory_generator)
expect(result.running).to(be_true)
with description('already running generators'):
with it('bad request (400)'):
request = client.models.BulkStartMemoryGeneratorsRequest(
[g7r.id for g7r in self._g8s])
expr = lambda: self._api.bulk_start_memory_generators(request)
expect(expr).to(raise_api_exception(400))
with it('state was not changed'):
for g7r in self._g8s:
result = self._api.get_memory_generator(g7r.id)
expect(result).to(be_valid_memory_generator)
expect(result.running).to(be_true)
with description('with non-existant ID'):
with before.all:
for num, g7r in enumerate(self._g8s, start=1):
try:
if (num % 2) == 0:
g7r.running = False
self._api.stop_memory_generator(g7r.id)
else:
g7r.running = True
self._api.start_memory_generator(g7r.id)
except Exception:
pass
self._results_count = len(self._api.list_memory_generator_results())
with it('not found (404)'):
request = client.models.BulkStartMemoryGeneratorsRequest(
[g7r.id for g7r in self._g8s] + ['unknown'])
expr = lambda: self._api.bulk_start_memory_generators(request)
expect(expr).to(raise_api_exception(404))
with it('state was not changed'):
for g7r in self._g8s:
result = self._api.get_memory_generator(g7r.id)
expect(result).to(be_valid_memory_generator)
expect(result.running).to(equal(g7r.running))
with it('new results was not created'):
results = self._api.list_memory_generator_results()
expect(len(results)).to(equal(self._results_count))
with description('with invalid ID'):
with before.all:
self._results_count = len(self._api.list_memory_generator_results())
with it('bad request (400)'):
request = client.models.BulkStartMemoryGeneratorsRequest(
[g7r.id for g7r in self._g8s] + ['bad_id'])
expr = lambda: self._api.bulk_start_memory_generators(request)
expect(expr).to(raise_api_exception(400))
with it('state was not changed'):
for g7r in self._g8s:
result = self._api.get_memory_generator(g7r.id)
expect(result).to(be_valid_memory_generator)
expect(result.running).to(equal(g7r.running))
with it('new results was not created'):
results = self._api.list_memory_generator_results()
expect(len(results)).to(equal(self._results_count))
with description('/memory-generators/x/bulk-stop'):
with context('PUT'):
with it('not allowed (405)'):
expect(lambda: self._api.api_client.call_api('/memory-generators/x/bulk-stop', 'PUT')).to(
raise_api_exception(405, headers={'Allow': "POST"}))
with description('POST'):
with before.all:
self._ids = []
model = memory_generator_model(self._api.api_client)
self._g8s = [
self._api.create_memory_generator(model)
for a in range(3)]
for a in range(3):
expect(wait_for_buffer_initialization_done(self._api, self._g8s[a].id, 10)).to(be_true)
with after.all:
request = client.models.BulkDeleteMemoryGeneratorsRequest(
[g7r.id for g7r in self._g8s])
self._api.bulk_delete_memory_generators(request)
with shared_context('stop generators'):
with before.all:
for g7r in self._g8s:
self._api.start_memory_generator(g7r.id)
with it('all running'):
for g7r in self._g8s:
result = self._api.get_memory_generator(g7r.id)
expect(result.running).to(be_true)
with it('no content (204)'):
request = client.models.BulkStopMemoryGeneratorsRequest(
[g7r.id for g7r in self._g8s] + self._ids)
result = self._api.bulk_stop_memory_generators_with_http_info(
request, _return_http_data_only=False)
expect(result[1]).to(equal(204))
with it('all stopped'):
for g7r in self._g8s:
result = self._api.get_memory_generator(g7r.id)
expect(result).to(be_valid_memory_generator)
expect(result.running).to(be_false)
with description('with existing IDs'):
with included_context('stop generators'):
pass
with description('already stopped generators'):
with it('no content (204)'):
request = client.models.BulkStopMemoryGeneratorsRequest(
[g7r.id for g7r in self._g8s])
result = self._api.bulk_stop_memory_generators_with_http_info(
request, _return_http_data_only=False)
expect(result[1]).to(equal(204))
with description('with non-existent ID'):
with before.all:
self._ids = ['unknown']
with included_context('stop generators'):
pass
with description('with invalid ID'):
with before.all:
self._ids = ['bad_id']
with included_context('stop generators'):
pass
with description('Memory Generator Results'):
with before.all:
model = memory_generator_model(self._api.api_client)
self._g7r = self._api.create_memory_generator(model)
expect(self._g7r).to(be_valid_memory_generator)
expect(wait_for_buffer_initialization_done(self._api, self._g7r.id, 10)).to(be_true)
self._runs = 3;
for i in range(self._runs):
self._api.start_memory_generator(self._g7r.id)
self._api.stop_memory_generator(self._g7r.id)
with description('/memory-generator-results'):
with context('PUT'):
with it('not allowed (405)'):
expect(lambda: self._api.api_client.call_api('/memory-generator-results', 'PUT')).to(
raise_api_exception(405, headers={'Allow': "GET"}))
with context('GET'):
with before.all:
self._result = self._api.list_memory_generator_results_with_http_info(
_return_http_data_only=False)
with it('success (200)'):
expect(self._result[1]).to(equal(200))
with it('has Content-Type: application/json header'):
expect(self._result[2]).to(has_json_content_type)
with it('results list'):
expect(self._result[0]).not_to(be_empty)
expect(len(self._result[0])).to(be(self._runs))
for result in self._result[0]:
expect(result).to(be_valid_memory_generator_result)
with description('/memory-generator-results/{id}'):
with before.all:
rlist = self._api.list_memory_generator_results()
expect(rlist).not_to(be_empty)
self._result = rlist[0]
with context('GET'):
with description('by existing ID'):
with before.all:
self._get_result = self._api.get_memory_generator_result_with_http_info(
self._result.id, _return_http_data_only=False)
with it('success (200)'):
expect(self._get_result[1]).to(equal(200))
with it('has Content-Type: application/json header'):
expect(self._get_result[2]).to(has_json_content_type)
with it('valid result'):
expect(self._get_result[0]).to(be_valid_memory_generator_result)
with description('by non-existent ID'):
with it('not found (404)'):
expr = lambda: self._api.get_memory_generator_result('unknown')
expect(expr).to(raise_api_exception(404))
with description('by invalid ID'):
with it('bad request (400)'):
expr = lambda: self._api.get_memory_generator_result('bad_id')
expect(expr).to(raise_api_exception(400))
with context('DELETE'):
with description('by existing ID'):
with description('active result'):
with before.all:
self._result = self._api.start_memory_generator(self._g7r.id)
with after.all:
self._api.stop_memory_generator(self._g7r.id)
with it('exists'):
expect(self._result).to(be_valid_memory_generator_result)
with it('is active'):
expect(self._result.active).to(be_true)
with it('bad request (400)'):
result = lambda: self._api.delete_memory_generator_result(self._result.id)
expect(result).to(raise_api_exception(400))
with it('not deleted'):
result = self._api.get_memory_generator_result(self._result.id)
expect(result).to(be_valid_memory_generator_result)
with description('inactive result'):
with before.all:
result = self._api.start_memory_generator(self._g7r.id)
self._api.stop_memory_generator(self._g7r.id)
self._result = self._api.get_memory_generator_result(result.id)
with it('exists'):
expect(self._result).to(be_valid_memory_generator_result)
with it('is not active'):
expect(self._result.active).to(be_false)
with it('deleted (204)'):
result = self._api.delete_memory_generator_result_with_http_info(
self._result.id, _return_http_data_only=False)
expect(result[1]).to(equal(204))
with it('not found (404)'):
expr = lambda: self._api.get_memory_generator_result(self._result.id)
expect(expr).to(raise_api_exception(404))
with description('by non-existent ID'):
with it('not found (404)'):
expr = lambda: self._api.delete_memory_generator_result('unknown')
expect(expr).to(raise_api_exception(404))
with description('by invalid ID'):
with it('bad request (400)'):
expr = lambda: self._api.delete_memory_generator_result('bad_id')
expect(expr).to(raise_api_exception(400))
with description('delete results with generator'):
with it('results exists'):
results = self._api.list_memory_generator_results()
expect(results).not_to(be_empty)
with it('generator deleted'):
result = self._api.delete_memory_generator_with_http_info(
self._g7r.id, _return_http_data_only=False)
expect(result[1]).to(equal(204))
with it('results deleted'):
results = self._api.list_memory_generator_results()
expect(results).to(be_empty)
|
479650
|
import math
import textwrap
str1=''.join(input().strip().split())
rows=math.floor(math.sqrt(len(str1)))
col=rows
if rows*col<len(str1):
col+=1
if rows*col<len(str1):
rows+=1
l1=textwrap.wrap(str1,col)
for i in range(0,col):
for j in range(0,rows):
if (i+1)+(j)*col>len(str1):
continue
print(l1[j][i],end='')
print(' ',end='')
|
479684
|
from numpy import atleast_2d, ndarray, float32, float64, float128
class PythonDSP(object):
"""A FAUST DSP wrapper.
This class is more low-level than the FAUST class. It can be viewed as an
abstraction that sits directly on top of the FAUST DSP struct.
"""
def __init__(self, C, ffi, fs):
"""Initialise a PythonDSP object.
To instantiate this object, you create a cffi.FFI object that contains
all required declarations (check the FAUSTPy.FAUST code for an
example). Then you compile the code via ffi.verfiy(), which creates an
FFILibrary object. Both of these are then passed to this constructor
along with the other parameters specified below.
Parameters:
-----------
C : cffi.FFILibrary
The FFILibrary that represents the compiled code.
ffi : cffi.FFI
The CFFI instance that holds all the data type declarations.
fs : int
The sampling rate the FAUST DSP should be initialised with.
"""
self.__C = C
self.__ffi = ffi
self.__faust_float = ffi.getctype("FAUSTFLOAT")
self.__dsp = ffi.gc(C.newmydsp(), C.deletemydsp)
self.metadata = {}
if fs <= 0:
raise ValueError("The sampling rate must have a positive value.")
if self.__faust_float == "float":
self.__dtype = float32
elif self.__faust_float == "double":
self.__dtype = float64
elif self.__faust_float == "long double":
self.__dtype = float128
# calls both classInitmydsp() and instanceInitmydsp()
C.initmydsp(self.__dsp, int(fs))
# allocate the input and output pointers so that they are not
# allocated/deallocated at every call to compute()
# TODO: can the number of inputs/outputs change at run time?
self.__input_p = self.__ffi.new("FAUSTFLOAT*[]", self.num_in)
self.__output_p = self.__ffi.new("FAUSTFLOAT*[]", self.num_out)
dsp = property(fget=lambda x: x.__dsp,
doc="The DSP struct that calls back to its parent object.")
dtype = property(fget=lambda x: x.__dtype,
doc="A dtype corresponding to the value of FAUSTFLOAT.")
faustfloat = property(fget=lambda x: x.__faust_float,
doc="The value of FAUSTFLOAT for this DSP.")
fs = property(fget=lambda s: s.__C.getSampleRatemydsp(s.__dsp),
doc="The sampling rate of the DSP.")
num_in = property(fget=lambda s: s.__C.getNumInputsmydsp(s.__dsp),
doc="The number of input channels.")
num_out = property(fget=lambda s: s.__C.getNumOutputsmydsp(s.__dsp),
doc="The number of output channels.")
def compute(self, audio):
"""
Process an ndarray with the FAUST DSP.
Parameters:
-----------
The first argument depends on the type of DSP (synthesizer or effect):
audio : numpy.ndarray
If the DSP is an effect (i.e., it processes input data and produces
output), the first argument is an audio signal to process.
or
count : int
If the DSP is a synthesizer (i.e., it has zero inputs and produces
output), the first argument is the number of output samples to
produce
Returns:
--------
out : numpy.ndarray
The output of the DSP.
Notes:
------
This function uses the buffer protocol to avoid copying the input data.
"""
if self.num_in > 0:
# returns a view, so very little overhead
audio = atleast_2d(audio)
# Verify that audio.dtype == self.dtype, because a) Python
# SEGFAULTs when audio.dtype < self.dtype and b) the computation is
# garbage when audio.dtype > self.dtype.
if audio.dtype != self.__dtype:
raise ValueError("audio.dtype must be {}".format(self.__dtype))
count = audio.shape[1] # number of samples
num_in = self.num_in # number of input channels
# set up the input pointers
for i in range(num_in):
self.__input_p[i] = self.__ffi.cast('FAUSTFLOAT *',
audio[i].ctypes.data)
else:
# special case for synthesizers: the input argument is the number
# of samples
count = audio
num_out = self.num_out # number of output channels
# initialise the output array
output = ndarray((num_out, count), dtype=self.__dtype)
# set up the output pointers
for i in range(num_out):
self.__output_p[i] = self.__ffi.cast('FAUSTFLOAT *',
output[i].ctypes.data)
# call the DSP
self.__C.computemydsp(self.__dsp, count, self.__input_p,
self.__output_p)
return output
# TODO: Run some more serious tests to check whether compute2() is worth
# keeping, because with the bundled DSP the run-time is about 83 us for
# 2x64 samples versus about 90 us for compute(), so only about 7 us
# difference.
def compute2(self, audio):
"""
Process an ndarray with the FAUST DSP, like compute(), but without any
safety checks. NOTE: compute2() can crash Python if "audio" is an
incompatible NumPy array!
This function is only useful if the DSP is an effect since the checks
not made here do not apply to synthesizers.
Parameters:
-----------
audio : numpy.ndarray
The audio signal to process.
Returns:
--------
out : numpy.ndarray
The output of the DSP.
Notes:
------
This function uses the buffer protocol to avoid copying the input data.
"""
count = audio.shape[1] # number of samples
num_in = self.num_in # number of input channels
num_out = self.num_out # number of output channels
# initialise the output array
output = ndarray((num_out, count), dtype=audio.dtype)
# set up the output pointers
for i in range(num_out):
self.__output_p[i] = self.__ffi.cast('FAUSTFLOAT *',
output[i].ctypes.data)
# set up the input pointers
for i in range(num_in):
self.__input_p[i] = self.__ffi.cast('FAUSTFLOAT *',
audio[i].ctypes.data)
# call the DSP
self.__C.computemydsp(self.__dsp, count, self.__input_p,
self.__output_p)
return output
|
479703
|
import re
from discord import MessageType
from utils.settings import settings
from utils.globals import gc, get_color
import utils
async def calc_mutations(msg):
try: # if the message is a file, extract the discord url from it
json = str(msg.attachments[0]).split("'")
for string in json:
if string is not None and string != "":
if "cdn.discordapp.com/attachments" in string:
msg.content = string
break
except IndexError: pass
# otherwise it must not have any attachments and its a regular message
text = msg.content
# check for in-line code blocks
if text.count("```") > 1:
while("```") in text:
text = await convert_code_block(text)
msg.content = text
# TODO: if there are asterics or __'s in the code, then
# this will not stop them from being formatted
# check for in-line code marks
if text.count("`") > 1:
while("`") in text:
text = await convert_code(text)
msg.content = text
# check to see if it has any custom-emojis
# These will look like <:emojiname:39432432903201>
# We will recursively trim this into just :emojiname:
if msg.server.emojis is not None and len(msg.server.emojis) > 0:
for emoji in msg.server.emojis:
full_name = "<:" + emoji.name + ":" + emoji.id + ">"
while full_name in text:
text = await trim_emoji(full_name, emoji.name, text)
msg.content = text
# check for boldened font
if text.count("**") > 1:
while("**") in text:
text = await convert_bold(text)
msg.content = text
# check for italic font
if text.count("*") > 1:
while("*") in text:
text = await convert_italic(text)
msg.content = text
# check for underlined font
if text.count("__") > 1:
while("__") in text:
text = await convert_underline(text)
msg.content = text
# check for urls
if "http://" in text or "https://" in text or "www." in text \
or "ftp://" in text or ".com" in text:
msg.content = await convert_url(text)
# check if the message is a "user has pinned..." message
if msg.type == MessageType.pins_add:
msg.content = await convert_pin(msg)
# else it must be a regular message, nothing else
return msg
async def convert_pin(msg):
name = ""
if msg.author.nick is not None and msg.author.nick != "":
name = msg.author.nick
else: name = msg.author.name
return "📌 " + str(name) + " has pinned a message to this channel."
async def trim_emoji(full_name, short_name, string):
return string.replace(full_name, ":" + short_name + ":")
async def convert_bold(string):
sections = string.split("**")
left = sections[0]
target = sections[1]
right = "".join(sections[2])
return gc.term.normal + gc.term.white + left + " " + gc.term.bold(target) + gc.term.normal + \
gc.term.white + " " + right
async def convert_italic(string):
sections = string.split("*")
left = sections[0]
target = sections[1]
right = "".join(sections[2])
return gc.term.normal + gc.term.white + left + " " + gc.term.italic(target) + gc.term.normal + \
gc.term.white + " " + right
async def convert_underline(string):
sections = string.split("__")
left = sections[0]
target = sections[1]
right = "".join(sections[2])
return gc.term.normal + gc.term.white + left + " " + gc.term.underline(target) + gc.term.normal + \
gc.term.white + " " + right
async def convert_code(string):
sections = string.split("`")
left = sections[0]
target = sections[1]
right = "".join(sections[2])
return gc.term.normal + gc.term.white + left + " " + await get_color(settings["code_block_color"]) \
+ target + gc.term.normal \
+ gc.term.white + " " + right
async def convert_code_block(string):
sections = string.split("```")
left = sections[0]
target = sections[1]
right = "".join(sections[2])
return gc.term.normal + gc.term.white + left + " " + gc.term.on_black(target) + gc.term.normal + \
gc.term.white + " " + right
async def convert_url(string):
formatted_line = []
entities = []
if " " in string:
entities = string.split(" ")
else:
entities.append(string)
for entity in entities:
if "http://" in entity or "https://" in entity or "www." in entity \
or "ftp://" in entity or ".com" in entity:
entity = await get_color(settings["url_color"]) + gc.term.italic + gc.term.underline + entity + gc.term.normal
formatted_line.append(entity)
return " ".join(formatted_line)
|
479718
|
from functools import update_wrapper
from typing import Callable, Sequence, Tuple, Union
import jax.numpy as np
from jax import ops, random
# define a type alias for Jax Pytrees
Pytree = Union[tuple, list]
Bijector_Info = Tuple[str, tuple]
class ForwardFunction:
"""Return the output and log_det of the forward bijection on the inputs.
ForwardFunction of a Bijector, originally returned by the
InitFunction of the Bijector.
Parameters
----------
params : a Jax pytree
A pytree of bijector parameters.
This usually looks like a nested tuple or list of parameters.
inputs : np.ndarray
The data to be transformed by the bijection.
Returns
-------
outputs : np.ndarray
Result of the forward bijection applied to the inputs.
log_det : np.ndarray
The log determinant of the Jacobian evaluated at the inputs.
"""
def __init__(self, func: Callable):
self._func = func
def __call__(
self, params: Pytree, inputs: np.ndarray, **kwargs
) -> Tuple[np.ndarray, np.ndarray]:
return self._func(params, inputs, **kwargs)
class InverseFunction:
"""Return the output and log_det of the inverse bijection on the inputs.
InverseFunction of a Bijector, originally returned by the
InitFunction of the Bijector.
Parameters
----------
params : a Jax pytree
A pytree of bijector parameters.
This usually looks like a nested tuple or list of parameters.
inputs : np.ndarray
The data to be transformed by the bijection.
Returns
-------
outputs : np.ndarray
Result of the inverse bijection applied to the inputs.
log_det : np.ndarray
The log determinant of the Jacobian evaluated at the inputs.
"""
def __init__(self, func: Callable):
self._func = func
def __call__(
self, params: Pytree, inputs: np.ndarray, **kwargs
) -> Tuple[np.ndarray, np.ndarray]:
return self._func(params, inputs, **kwargs)
class InitFunction:
"""Initialize the corresponding Bijector.
InitFunction returned by the initialization of a Bijector.
Parameters
----------
rng : np.ndarray
A Random Number Key from jax.random.PRNGKey.
input_dim : int
The input dimension of the bijection.
Returns
-------
params : a Jax pytree
A pytree of bijector parameters.
This usually looks like a nested tuple or list of parameters.
forward_fun : ForwardFunction
The forward function of the Bijector.
inverse_fun : InverseFunction
The inverse function of the Bijector.
"""
def __init__(self, func: Callable):
self._func = func
def __call__(
self, rng: np.ndarray, input_dim: int, **kwargs
) -> Tuple[Pytree, ForwardFunction, InverseFunction]:
return self._func(rng, input_dim, **kwargs)
class Bijector:
"""Wrapper class for bijector functions"""
def __init__(self, func: Callable):
self._func = func
update_wrapper(self, func)
def __call__(self, *args, **kwargs) -> Tuple[InitFunction, Bijector_Info]:
return self._func(*args, **kwargs)
@Bijector
def Chain(
*inputs: Sequence[Tuple[InitFunction, Bijector_Info]]
) -> Tuple[InitFunction, Bijector_Info]:
"""Bijector that chains multiple InitFunctions into a single InitFunction.
Parameters
----------
inputs : (Bijector1(), Bijector2(), ...)
A container of Bijector calls to be chained together.
Returns
-------
InitFunction
The InitFunction of the total chained Bijector.
Bijector_Info
Tuple('Chain', Tuple(Bijector_Info for each bijection in the chain))
This allows the chain to be recreated later.
"""
init_funs = tuple(i[0] for i in inputs)
bijector_info = ("Chain", tuple(i[1] for i in inputs))
@InitFunction
def init_fun(rng, input_dim, **kwargs):
all_params, forward_funs, inverse_funs = [], [], []
for init_f in init_funs:
rng, layer_rng = random.split(rng)
param, forward_f, inverse_f = init_f(layer_rng, input_dim)
all_params.append(param)
forward_funs.append(forward_f)
inverse_funs.append(inverse_f)
def bijector_chain(params, bijectors, inputs, **kwargs):
log_dets = np.zeros(inputs.shape[0])
for bijector, param in zip(bijectors, params):
inputs, log_det = bijector(param, inputs, **kwargs)
log_dets += log_det
return inputs, log_dets
@ForwardFunction
def forward_fun(params, inputs, **kwargs):
return bijector_chain(params, forward_funs, inputs, **kwargs)
@InverseFunction
def inverse_fun(params, inputs, **kwargs):
return bijector_chain(params[::-1], inverse_funs[::-1], inputs, **kwargs)
return all_params, forward_fun, inverse_fun
return init_fun, bijector_info
@Bijector
def ColorTransform(ref_idx: int, mag_idx: int) -> Tuple[InitFunction, Bijector_Info]:
"""Bijector that calculates photometric colors from magnitudes.
Using ColorTransform restricts and impacts the order of columns in the
corresponding normalizing flow. See the notes below for an example.
Parameters
----------
ref_idx : int
The index corresponding to the column of the reference band, which
serves as a proxy for overall luminosity.
mag_idx : arraylike of int
The indices of the magnitude columns from which colors will be calculated.
Returns
-------
InitFunction
The InitFunction of the ColorTransform Bijector.
Bijector_Info
Tuple of the Bijector name and the input parameters.
This allows it to be recreated later.
Notes
-----
ColorTransform requires careful management of column order in the bijector.
This is best explained with an example:
Assume we have data
[redshift, u, g, ellipticity, r, i, z, y, mass]
Then
ColorTransform(ref_idx=4, mag_idx=[1, 2, 4, 5, 6, 7])
will output
[redshift, ellipticity, mass, r, u-g, g-r, r-i, i-z, z-y]
Notice how the non-magnitude columns are aggregated at the front of the
array, maintaining their relative order from the original array.
These values are then followed by the reference magnitude, and the new colors.
Also notice that the magnitudes indices in mag_idx are assumed to be
adjacent colors. E.g. mag_idx=[1, 2, 5, 4, 6, 7] would have produced
the colors [u-g, g-i, i-r, r-z, z-y]. You can chain multiple ColorTransforms
back-to-back to create colors in a non-adjacent manner.
"""
# validate parameters
if ref_idx <= 0:
raise ValueError("ref_idx must be a positive integer.")
if not isinstance(ref_idx, int):
raise ValueError("ref_idx must be an integer.")
if ref_idx not in mag_idx:
raise ValueError("ref_idx must be in mag_idx.")
bijector_info = ("ColorTransform", (ref_idx, mag_idx))
# convert mag_idx to an array
mag_idx = np.array(mag_idx)
@InitFunction
def init_fun(rng, input_dim, **kwargs):
# array of all the indices
all_idx = np.arange(input_dim)
# indices for columns to stick at the front
front_idx = np.setdiff1d(all_idx, mag_idx)
# the index corresponding to the first magnitude
mag0_idx = len(front_idx)
# the new column order
new_idx = np.concatenate((front_idx, mag_idx))
# the new column for the reference magnitude
new_ref = np.where(new_idx == ref_idx)[0][0]
# define a convenience function for the forward_fun below
# if the first magnitude is the reference mag, do nothing
if ref_idx == mag_idx[0]:
def mag0(outputs):
return outputs
# if the first magnitude is not the reference mag,
# then we need to calculate the first magnitude (mag[0])
else:
def mag0(outputs):
return ops.index_update(
outputs,
ops.index[:, mag0_idx],
outputs[:, mag0_idx] + outputs[:, new_ref],
indices_are_sorted=True,
unique_indices=True,
)
@ForwardFunction
def forward_fun(params, inputs, **kwargs):
# re-order columns and calculate colors
outputs = np.hstack(
(
inputs[:, front_idx], # other values
inputs[:, ref_idx, None], # ref mag
-np.diff(inputs[:, mag_idx]), # colors
)
)
# determinant of Jacobian is zero
log_det = np.zeros(inputs.shape[0])
return outputs, log_det
@InverseFunction
def inverse_fun(params, inputs, **kwargs):
# convert all colors to be in terms of the first magnitude, mag[0]
outputs = np.hstack(
(
inputs[:, 0:mag0_idx], # other values unchanged
inputs[:, mag0_idx, None], # reference mag unchanged
np.cumsum(
inputs[:, mag0_idx + 1 :], axis=-1
), # all colors mag[i-1] - mag[i] --> mag[0] - mag[i]
)
)
# calculate mag[0]
outputs = mag0(outputs)
# mag[i] = mag[0] - (mag[0] - mag[i])
outputs = ops.index_update(
outputs,
ops.index[:, mag0_idx + 1 :],
outputs[:, mag0_idx, None] - outputs[:, mag0_idx + 1 :],
indices_are_sorted=True,
unique_indices=True,
)
# return to original ordering
outputs = outputs[:, np.argsort(new_idx)]
# determinant of Jacobian is zero
log_det = np.zeros(inputs.shape[0])
return outputs, log_det
return (), forward_fun, inverse_fun
return init_fun, bijector_info
@Bijector
def InvSoftplus(
column_idx: int, sharpness: float = 1
) -> Tuple[InitFunction, Bijector_Info]:
"""Bijector that applies inverse softplus to the specified column(s).
Applying the inverse softplus ensures that samples from that column will
always be non-negative. This is because samples are the output of the
inverse bijection -- so samples will have a softplus applied to them.
Parameters
----------
column_idx : int
An index or iterable of indices corresponding to the column(s)
you wish to be transformed.
sharpness : float, default=1
The sharpness(es) of the softplus transformation. If more than one
is provided, the list of sharpnesses must be of the same length as
column_idx.
Returns
-------
InitFunction
The InitFunction of the Softplus Bijector.
Bijector_Info
Tuple of the Bijector name and the input parameters.
This allows it to be recreated later.
"""
idx = np.atleast_1d(column_idx)
k = np.atleast_1d(sharpness)
if len(idx) != len(k) and len(k) != 1:
raise ValueError(
"Please provide either a single sharpness or one for each column index."
)
bijector_info = ("InvSoftplus", (column_idx, sharpness))
@InitFunction
def init_fun(rng, input_dim, **kwargs):
@ForwardFunction
def forward_fun(params, inputs, **kwargs):
outputs = ops.index_update(
inputs,
ops.index[:, idx],
np.log(-1 + np.exp(k * inputs[:, idx])) / k,
)
log_det = np.log(1 + np.exp(-k * outputs[ops.index[:, idx]])).sum(axis=1)
return outputs, log_det
@InverseFunction
def inverse_fun(params, inputs, **kwargs):
outputs = ops.index_update(
inputs,
ops.index[:, idx],
np.log(1 + np.exp(k * inputs[:, idx])) / k,
)
log_det = -np.log(1 + np.exp(-k * inputs[ops.index[:, idx]])).sum(axis=1)
return outputs, log_det
return (), forward_fun, inverse_fun
return init_fun, bijector_info
@Bijector
def Reverse() -> Tuple[InitFunction, Bijector_Info]:
"""Bijector that reverses the order of inputs.
Returns
-------
InitFunction
The InitFunction of the the Reverse Bijector.
Bijector_Info
Tuple of the Bijector name and the input parameters.
This allows it to be recreated later.
"""
bijector_info = ("Reverse", ())
@InitFunction
def init_fun(rng, input_dim, **kwargs):
@ForwardFunction
def forward_fun(params, inputs, **kwargs):
outputs = inputs[:, ::-1]
log_det = np.zeros(inputs.shape[0])
return outputs, log_det
@InverseFunction
def inverse_fun(params, inputs, **kwargs):
outputs = inputs[:, ::-1]
log_det = np.zeros(inputs.shape[0])
return outputs, log_det
return (), forward_fun, inverse_fun
return init_fun, bijector_info
@Bijector
def Roll(shift: int = 1) -> Tuple[InitFunction, Bijector_Info]:
"""Bijector that rolls inputs along their last column using np.roll.
Parameters
----------
shift : int, default=1
The number of places to roll.
Returns
-------
InitFunction
The InitFunction of the the Roll Bijector.
Bijector_Info
Tuple of the Bijector name and the input parameters.
This allows it to be recreated later.
"""
if not isinstance(shift, int):
raise ValueError("shift must be an integer.")
bijector_info = ("Roll", (shift,))
@InitFunction
def init_fun(rng, input_dim, **kwargs):
@ForwardFunction
def forward_fun(params, inputs, **kwargs):
outputs = np.roll(inputs, shift=shift, axis=-1)
log_det = np.zeros(inputs.shape[0])
return outputs, log_det
@InverseFunction
def inverse_fun(params, inputs, **kwargs):
outputs = np.roll(inputs, shift=-shift, axis=-1)
log_det = np.zeros(inputs.shape[0])
return outputs, log_det
return (), forward_fun, inverse_fun
return init_fun, bijector_info
@Bijector
def Scale(scale: float) -> Tuple[InitFunction, Bijector_Info]:
"""Bijector that multiplies inputs by a scalar.
Parameters
----------
scale : float
Factor by which to scale inputs.
Returns
-------
InitFunction
The InitFunction of the the Scale Bijector.
Bijector_Info
Tuple of the Bijector name and the input parameters.
This allows it to be recreated later.
"""
if isinstance(scale, np.ndarray):
if scale.dtype != np.float32:
raise ValueError("scale must be a float or array of floats.")
elif not isinstance(scale, float):
raise ValueError("scale must be a float or array of floats.")
bijector_info = ("Scale", (scale,))
@InitFunction
def init_fun(rng, input_dim, **kwargs):
@ForwardFunction
def forward_fun(params, inputs, **kwargs):
outputs = scale * inputs
log_det = np.log(scale ** inputs.shape[-1]) * np.ones(inputs.shape[0])
return outputs, log_det
@InverseFunction
def inverse_fun(params, inputs, **kwargs):
outputs = 1 / scale * inputs
log_det = -np.log(scale ** inputs.shape[-1]) * np.ones(inputs.shape[0])
return outputs, log_det
return (), forward_fun, inverse_fun
return init_fun, bijector_info
@Bijector
def ShiftBounds(
min: float, max: float, B: float = 5
) -> Tuple[InitFunction, Bijector_Info]:
"""Bijector shifts the bounds of inputs so the lie in the range (-B, B).
Parameters
----------
min : float
The minimum of the input range.
min : float
The maximum of the input range.
B : float, default=5
The extent of the output bounds, which will be (-B, B).
Returns
-------
InitFunction
The InitFunction of the ShiftBounds Bijector.
Bijector_Info
Tuple of the Bijector name and the input parameters.
This allows it to be recreated later.
"""
min = np.atleast_1d(min)
max = np.atleast_1d(max)
if len(min) != len(max):
raise ValueError(
"Lengths of min and max do not match. "
+ "Please provide either a single min and max, "
+ "or a min and max for each dimension."
)
if (min > max).any():
raise ValueError("All mins must be less than maxes.")
bijector_info = ("ShiftBounds", (min, max, B))
mean = (max + min) / 2
half_range = (max - min) / 2
@InitFunction
def init_fun(rng, input_dim, **kwargs):
@ForwardFunction
def forward_fun(params, inputs, **kwargs):
outputs = B * (inputs - mean) / half_range
log_det = np.log(np.prod(B / half_range)) * np.ones(inputs.shape[0])
return outputs, log_det
@InverseFunction
def inverse_fun(params, inputs, **kwargs):
outputs = inputs * half_range / B + mean
log_det = np.log(np.prod(half_range / B)) * np.ones(inputs.shape[0])
return outputs, log_det
return (), forward_fun, inverse_fun
return init_fun, bijector_info
@Bijector
def Shuffle() -> Tuple[InitFunction, Bijector_Info]:
"""Bijector that randomly permutes inputs.
Returns
-------
InitFunction
The InitFunction of the Shuffle Bijector.
Bijector_Info
Tuple of the Bijector name and the input parameters.
This allows it to be recreated later.
"""
bijector_info = ("Shuffle", ())
@InitFunction
def init_fun(rng, input_dim, **kwargs):
perm = random.permutation(rng, np.arange(input_dim))
inv_perm = np.argsort(perm)
@ForwardFunction
def forward_fun(params, inputs, **kwargs):
outputs = inputs[:, perm]
log_det = np.zeros(inputs.shape[0])
return outputs, log_det
@InverseFunction
def inverse_fun(params, inputs, **kwargs):
outputs = inputs[:, inv_perm]
log_det = np.zeros(inputs.shape[0])
return outputs, log_det
return (), forward_fun, inverse_fun
return init_fun, bijector_info
@Bijector
def StandardScaler(
means: np.array, stds: np.array
) -> Tuple[InitFunction, Bijector_Info]:
"""Bijector that applies standard scaling to each input.
Each input dimension i has an associated mean u_i and standard dev s_i.
Each input is rescaled as (input[i] - u_i)/s_i, so that each input dimension
has mean zero and unit variance.
Parameters
----------
means : np.ndarray
The mean of each column.
stds : np.ndarray
The standard deviation of each column.
Returns
-------
InitFunction
The InitFunction of the StandardScaler Bijector.
Bijector_Info
Tuple of the Bijector name and the input parameters.
This allows it to be recreated later.
"""
bijector_info = ("StandardScaler", (means, stds))
@InitFunction
def init_fun(rng, input_dim, **kwargs):
@ForwardFunction
def forward_fun(params, inputs, **kwargs):
outputs = (inputs - means) / stds
log_det = np.log(1 / np.prod(stds)) * np.ones(inputs.shape[0])
return outputs, log_det
@InverseFunction
def inverse_fun(params, inputs, **kwargs):
outputs = inputs * stds + means
log_det = np.log(np.prod(stds)) * np.ones(inputs.shape[0])
return outputs, log_det
return (), forward_fun, inverse_fun
return init_fun, bijector_info
@Bijector
def UniformDequantizer(column_idx: int = None) -> Tuple[InitFunction, Bijector_Info]:
"""Bijector that dequantizes discrete variables with uniform noise.
Dequantizers are necessary for modeling discrete values with a flow.
Note that this isn't technically a bijector.
Parameters
----------
column_idx : int
An index or iterable of indices corresponding to the column(s) with
discrete values.
Returns
-------
InitFunction
The InitFunction of the UniformDequantizer Bijector.
Bijector_Info
Tuple of the Bijector name and the input parameters.
This allows it to be recreated later.
"""
bijector_info = ("UniformDequantizer", (column_idx,))
if column_idx is None:
idx = ops.index[:, :]
else:
idx = ops.index[:, column_idx]
@InitFunction
def init_fun(rng, input_dim, **kwargs):
@ForwardFunction
def forward_fun(params, inputs, **kwargs):
u = random.uniform(random.PRNGKey(0), shape=inputs[idx].shape)
outputs = ops.index_update(
inputs.astype(float),
idx,
inputs[idx].astype(float) + u,
indices_are_sorted=True,
unique_indices=True,
)
log_det = np.zeros(inputs.shape[0])
return outputs, log_det
@InverseFunction
def inverse_fun(params, inputs, **kwargs):
outputs = ops.index_update(
inputs,
idx,
np.floor(inputs[idx]),
indices_are_sorted=True,
unique_indices=True,
)
log_det = np.zeros(inputs.shape[0])
return outputs, log_det
return (), forward_fun, inverse_fun
return init_fun, bijector_info
|
479726
|
from data_mapper.properties import Property
from data_mapper.properties.compound_list import CompoundListProperty as L
from data_mapper.shortcuts import V, P
from data_mapper.tests.test_utils import PropertyTestCase
class CompoundListPropertyTests(PropertyTestCase):
def test__simple(self):
self.prop_test(L(V(1), V(2)), [1, 2])
def test__empty(self):
self.prop_test(L(), [])
def test__props_it(self):
self.prop_test(L(props_it=[V(1), V(2)]), [1, 2])
self.prop_test(L(props_it=[V(1), V([2, 3])]), [1, [2, 3]])
def test__star_props(self):
self.prop_test(L(V(0)), [0])
self.prop_test(L(V(0), V(1)), [0, 1])
self.prop_test(L(V(0), V(1), V(2)), [0, 1, 2])
self.prop_test(L(V(0), V(1), V(2)), [0, 1, 2])
def test__star_props__some_not_resolved(self):
self.prop_not_resolved(L(P(0)))
self.prop_not_resolved(L(P(0), V(1)))
self.prop_not_resolved(L(V(0), P(1)))
self.prop_not_resolved(L(V(0), V(1), P(2)))
self.prop_not_resolved(L(P(0), P(1), P(2)))
def test__star_props__lists(self):
self.prop_test(L([V(0), P(1)]), [0])
self.prop_test(L([P(0), V(1)]), [1])
self.prop_test(L([P(0), V(1)], [V(2)]), [1, 2])
self.prop_test(L([P(0), V(1)], [P(2), V(3)]), [1, 3])
def test__star_props__lists__some_entirely_not_resolved(self):
self.prop_not_resolved(L([P(0)]))
self.prop_not_resolved(L([P(0), P(1)]))
self.prop_not_resolved(L([P(0), V(1)], [P(2)]))
def test__get_value__set_in_parent(self):
prop = L(
Property('x'),
get_value=lambda *_: 'foo',
)
self.prop_test(prop, ['foo'], dict(x=5))
def test__get_value__set_in_grand_parent(self):
prop = L(
L(Property('x')),
get_value=lambda *_: 'foo',
)
self.prop_test(prop, [['foo']], dict(x=5))
|
479781
|
import pandas as pd
def collectPitch(df):
pitch_types = ['FT','FS','CH','FF','SL','CU','FC','SI','KC','EP','KN','FO']
pitch = df.sample(n=1)
pitch_label_df = pitch[pitch_types]
pitch_data = pitch.drop(pitch_types,axis=1).values.tolist()
return pitch_data, pitch_label_df.style.highlight_max(axis=1)
|
479783
|
import logging
from flask import request
from flask_restplus import Resource
from biolink.ontology.ontology_manager import get_ontology
from biolink.datamodel.serializers import compact_association_set, association_results
from ontobio.golr.golr_associations import search_associations, GolrFields
from ontobio.ontol_factory import OntologyFactory
from ontobio.config import get_config
from ontobio.assoc_factory import AssociationSetFactory
from biolink.api.restplus import api
from biolink import USER_AGENT
log = logging.getLogger(__name__)
parser = api.parser()
parser.add_argument('subject', action='append', help='Entity ids to be examined, e.g. NCBIGene:9342, NCBIGene:7227, NCBIGene:8131, NCBIGene:157570, NCBIGene:51164, NCBIGene:6689, NCBIGene:6387')
parser.add_argument('background', action='append', help='Entity ids in background set, e.g. NCBIGene:84570, NCBIGene:3630; used in over-representation tests')
parser.add_argument('object_category', help='E.g. phenotype, function')
parser.add_argument('subject_category', default='gene', help='Default: gene. Other types may be used e.g. disease but statistics may not make sense')
parser.add_argument('max_p_value', default='0.05', help='Exclude results with p-value greater than this')
parser.add_argument('ontology', help='ontology id. Must be obo id. Examples: go, mp, hp, uberon (optional: will be inferred if left blank)')
parser.add_argument('taxon', help='must be NCBITaxon CURIE. Example: NCBITaxon:9606')
@api.doc(params={'object_category': 'CATEGORY of entity at link OBJECT (target), e.g. function, phenotype, disease'})
class OverRepresentation(Resource):
@api.expect(parser)
#<EMAIL>(association)
def get(self):
"""
Summary statistics for objects associated
"""
args = parser.parse_args()
M=GolrFields()
ont = None
ocat = args.get('object_category')
ontid = args.get('ontology')
if ontid is None:
if ocat == 'function':
ontid = 'go'
if ocat == 'phenotype':
# TODO: other phenotype ontologies
ontid = 'hp'
print("Loading: {}".format(ontid))
ont = get_ontology(ontid)
taxid = args.get('taxon')
max_p_value = float(args.max_p_value)
subjects = args.get('subject')
background = args.get('background')
afactory = AssociationSetFactory()
aset = afactory.create(ontology=ont, subject_category='gene', object_category=ocat, taxon=taxid)
enr = aset.enrichment_test(subjects=subjects, background=background, threshold=max_p_value, labels=True)
return {'results': enr }
|
479792
|
from setuptools import setup, find_packages
setup(
name = 'docformer',
packages = find_packages(where="src"),
package_dir = {"": "src", "docformer": "src/docformer"},
version = '0.1.0',
license='MIT',
description = 'DocFormer: End-to-End Transformer for Document Understanding',
author = '<NAME>, <NAME>',
author_email = '<EMAIL>, <EMAIL>',
url = 'https://github.com/shabie/docformer',
keywords = [
'artificial intelligence',
'attention mechanism',
'document understanding',
],
install_requires=[
'einops>=0.3',
'torch>=1.6',
'torchvision',
'pytesseract',
'transformers',
'pytesseract>=0.3.8',
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.7',
],
)
|
479793
|
import sys
sys.path.append('../')
import argparse
import os
import torch
from elmoformanylangs import Embedder
from pytorch_transformers import BertModel, BertTokenizer
from tqdm import tqdm
import numpy as np
from data_util import io_helper
"""
Creates static vectors for vocabulary
"""
def get_elmo(model, sentence_list, indices_list):
"""
Get elmo embeddings
:param model: elmo model
:param sentence_list: List of sentences for which embeddings will be created
:param indices_list: List of list of indices. For every sentence: A list of indices for which words embeddings will be created
:return: List of list of word vectors. First list represents the sentences. Second list contains the word vectors in the same order as the indices were given in the indices_list.
"""
vector_list = []
# Give ELMo a list of sentences
embedding = model.sents2elmo(sentence_list)
for i in range(0, len(sentence_list)):
# Extract the words matching the indices for this sentence
indices = indices_list[i]
for ind in indices:
if ind == "":
continue
ind = int(ind)
e = embedding[i][ind]
vector_list += [e.tolist()]
return vector_list
def load_bert():
"""
Loads BERT model
:return: bert model
"""
pretrained_weights = 'bert-base-multilingual-cased'
model = BertModel.from_pretrained(pretrained_weights)
# Move BERT to cuda if available
if torch.cuda.is_available():
model.to("cuda")
return model
def get_bert(model, tokenizer, sentence_list, index_list):
"""
Get BERT embeddings
:param model: BERT model
:param tokenizer: BERT tokenizer
:param sentence_list: List of sentences for which embeddings will be created
:param indices_list: List of list of indices. For every sentence: A list of indices for which words embeddings will be created
:return: List of list of word vectors. First list represents the sentences. Second list contains the word vectors in the same order as the indices were given in the indices_list.
"""
vector_list = []
for i in range(0, len(sentence_list)):
sentence = sentence_list[i]
indices = index_list[i]
sentence_str = " ".join(sentence)
if torch.cuda.is_available():
input_ids_sentence = torch.tensor([tokenizer.encode(sentence_str, add_special_tokens=True)], device="cuda")
else:
input_ids_sentence = torch.tensor([tokenizer.encode(sentence_str, add_special_tokens=True)])
last_hidden_states = model(input_ids_sentence)[0] # Models outputs are now tuples
# Find relevant vectors
token_count_per_word = []
for j in range(0, len(sentence)):
# Save number of tokens per word
word = sentence[j]
tokens = tokenizer.encode(word, add_special_tokens=False)
token_count_per_word.append(len(tokens))
for j in range(0, len(indices)):
if indices[j] == "":
continue
# Find tokens for this word
index = int(indices[j])
offset = 1
for k in range(0, index):
offset += token_count_per_word[k]
last_token = offset + token_count_per_word[index]
vector_list += [np.mean(last_hidden_states[0][offset:last_token].tolist(),
axis=0)] # BERT uses word piece tokenization. Calculate the average over all tokens from this word
return vector_list
def load_elmo(lang):
"""
Loads elmo model
:param lang: language of the model
:return: elmo model
"""
emb = Embedder("../embeddings/elmo/" + lang)
return emb
def gen_embeds(vocab_dir, output_dir, lang):
"""
Generates all supported embeddings for given language
:param vocab_dir: Directory of the vocabulary files
:param output_dir: Directory of the embedding files
:param lang: language
"""
print("Generate " + lang + " embeddings")
# Read the input file to a list
sentence_file = os.path.join(vocab_dir, lang + ".txt")
with open(sentence_file) as f:
lines = f.read().splitlines()
f.close()
# Delete embeddings
clear_embeds(output_dir, "elmo", lang)
clear_embeds(output_dir, "bert", lang)
sentence_list = []
indices_list = []
for line in lines:
parts = line.split("\t")
tokens = parts[0].split(" ")
indices = parts[1].split(" ")
sentence_list.append(tokens)
indices_list.append(indices)
# Batch
batch = 10
s = int(len(sentence_list) / batch) + 1
# Load BERT
bert = load_bert()
bert_tokenizer = BertTokenizer.from_pretrained('bert-base-multilingual-cased')
for i in tqdm(range(0, s)):
# Check if this is the last batch and correct the last index
start = i * batch
end = (i + 1) * batch
if start >= len(sentence_list):
break
if end > len(sentence_list):
end = len(sentence_list)
# Sentences and indices for this batch
sl = sentence_list[start:end]
il = indices_list[start:end]
# Extract and save embeddings
store_embeds(output_dir, sl, il, get_elmo(load_elmo(lang), sl, il), lang, "elmo")
store_embeds(output_dir, sl, il, get_bert(bert, bert_tokenizer, sl, il), lang, "bert")
def clear_embeds(output_dir, embedding, lang):
"""
Deletes all embedding files for the given language and embedding
:param output_dir: directory of the embeddings
:param embedding: embedding
:param lang: language
"""
outfile = os.path.join(output_dir, embedding, lang + ".vec")
if os._exists(outfile):
os.remove(outfile)
def store_embeds(output_dir, sentence_list, indices_list, vector_list, lang, embedding):
"""
Store all embeddings in a file
:param output_dir: Directory of the embedding files
:param sentence_list: list of list of tokens
:param indices_list: list of list of word indices.
:param vector_list: list of word vectors for the tokens
:param lang: language
:param embedding: embedding type
"""
# Create the output file
outfile = os.path.join(output_dir, embedding, lang + ".vec")
io_helper.ensure_dir_for_file(outfile)
with open(outfile, 'a') as fout:
j = 0
for i in range(0, len(sentence_list)):
# Write a separate line for every sentence index-pair
sentence = sentence_list[i]
sentence_str = " ".join(sentence)
for index in indices_list[i]:
if index == "":
continue
# Read vector and convert it to a string
vector = vector_list[j]
j += 1
vector_string = ""
z = 0
for number in vector:
if z != 0:
vector_string += " "
z += 1
vector_string += str(number)
# Write vector for sentence-index pair to the file
fout.write(sentence_str + "\t" + index + "\t" + vector_string + "\n")
fout.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# argument options
parser.add_argument('--vocab', type=str, required=False, default="../intrinsic/sentences/",
help='directory of the vocabulary')
parser.add_argument('--output', type=str, required=False, default="../intrinsic/static_context_embeddings/",
help='output directory of the embeddings')
parser.add_argument('--embeddings', type=str, required=False, default=None)
parser.add_argument('--langs', type=str, required=False, default=None)
args = parser.parse_args()
embeddings = None
if args.embeddings is not None:
embeddings = args.embeddings.split(",")
langs = ["german", "finnish", "russian", "turkish", "spanish"]
if args.langs is not None:
langs = args.langs.split(",")
for lang in langs:
print("Language " + lang)
gen_embeds(args.vocab, args.output, lang)
|
479868
|
from django.db import models
# Create your models here.
from kala.models import Kala
from taghaza.models import Taghaza
class Sefaresh(models.Model):
taghaza = models.ForeignKey(Taghaza, on_delete=models.DO_NOTHING , verbose_name="شماره تقاضا")
kala = models.ForeignKey(Kala, on_delete=models.DO_NOTHING, verbose_name="نام کالا")
meghdar = models.IntegerField(verbose_name="مقدار")
class Meta:
verbose_name = "سفارش"
verbose_name_plural = "سفارش ها"
def __str__(self):
return str(self.kala) + " به مقدار: " + str(self.meghdar) +" "+ self.kala.vahede_andazegiri
|
479889
|
from typing import KeysView, Generator
SERVICES_FOR_GROUP = {
"all": "tad_harvester tad_timelord_launcher tad_timelord tad_farmer tad_full_node tad_wallet".split(),
"node": "tad_full_node".split(),
"harvester": "tad_harvester".split(),
"farmer": "tad_harvester tad_farmer tad_full_node tad_wallet".split(),
"farmer-no-wallet": "tad_harvester tad_farmer tad_full_node".split(),
"farmer-only": "tad_farmer".split(),
"timelord": "tad_timelord_launcher tad_timelord tad_full_node".split(),
"timelord-only": "tad_timelord".split(),
"timelord-launcher-only": "tad_timelord_launcher".split(),
"wallet": "tad_wallet tad_full_node".split(),
"wallet-only": "tad_wallet".split(),
"introducer": "tad_introducer".split(),
"simulator": "tad_full_node_simulator".split(),
}
def all_groups() -> KeysView[str]:
return SERVICES_FOR_GROUP.keys()
def services_for_groups(groups) -> Generator[str, None, None]:
for group in groups:
for service in SERVICES_FOR_GROUP[group]:
yield service
def validate_service(service: str) -> bool:
return any(service in _ for _ in SERVICES_FOR_GROUP.values())
|
479908
|
from flask import request, jsonify
from app import db, jwt
from app.auth import bp
from app.models import Users, RevokedTokenModel
from app.schemas import UsersDeserializingSchema
from app.errors.handlers import bad_request, error_response
from flask_jwt_extended import (
create_access_token,
create_refresh_token,
get_jwt_identity,
jwt_required,
get_jwt,
)
from marshmallow import ValidationError
user_schema = UsersDeserializingSchema()
# Checks if the JWT is on the blacklisted token list
@jwt.token_in_blocklist_loader
def check_if_token_in_blacklist(jwt_header, jwt_data) -> bool:
"""
Helper function for checking if a token is present in the database revoked token table
Parameters
----------
jwt_header : dictionary
header data of the JWT
jwt_data : dictionary
payload data of the JWT
Returns
-------
bool
Returns True if the token is revoked, False otherwise
"""
jti = jwt_data["jti"]
return RevokedTokenModel.is_jti_blacklisted(jti)
@bp.post("/register")
def register() -> str:
"""
Endpoint for adding a new user to the database
Returns
-------
str
A JSON object containing the success message
"""
try:
result = user_schema.load(request.json)
except ValidationError as e:
return bad_request(e.messages)
if Users.query.filter_by(username=result["username"]).first():
return bad_request("Username already in use")
if Users.query.filter_by(email=result["email"]).first():
return bad_request("Email already in use")
user = Users(
username=result["username"],
first_name=result["first_name"],
last_name=result["last_name"],
email=result["email"],
birthday=result["birthday"],
)
user.set_password(result["password"])
db.session.add(user)
db.session.commit()
return jsonify({"msg": "Successfully registered"}), 201
@bp.post("/login")
def login() -> str:
"""
Endpoint for authorizing a user and retrieving a JWT
Returns
-------
str
A JSON object containing both the access JWT and the refresh JWT
"""
try:
result = user_schema.load(request.json)
except ValidationError as e:
return bad_request(e.messages)
user = Users.query.filter_by(username=result["username"]).first()
if user is None or not user.check_password(result["password"]):
return error_response(401, message="Invalid username or password")
tokens = {
"access_token": create_access_token(identity=user.id, fresh=True),
"refresh_token": create_refresh_token(identity=user.id),
}
return jsonify(tokens), 200
@bp.post("/refresh")
@jwt_required(refresh=True)
def refresh() -> str:
"""
Endpoint in order to retrieve a new access JWT using the refresh JWT.
A non-fresh access token is returned because the password is not involved in this transaction
Returns
-------
str
A JSON object containing the new access token
"""
user_id = get_jwt_identity()
new_token = create_access_token(identity=user_id, fresh=False)
payload = {"access_token": new_token}
return jsonify(payload), 200
@bp.post("/fresh-login")
def fresh_login() -> str:
"""
Endpoint for requesting a new fresh access token
Returns
-------
str
A JSON object containing
"""
try:
result = user_schema.load(request.json)
except ValidationError as e:
return bad_request(e.messages)
user = Users.query.filter_by(username=result["username"]).first()
if user is None or not user.check_password(result["password"]):
return error_response(401, message="Invalid username or password")
new_token = create_access_token(identity=user.id, fresh=True)
payload = {"access_token": new_token}
return jsonify(payload), 200
@bp.delete("/logout/token")
@jwt_required()
def logout_access_token() -> str:
"""
Endpoint for revoking the current user"s access token
Returns
-------
str
A JSON object containing the sucess message
"""
jti = get_jwt()["jti"]
revoked_token = RevokedTokenModel(jti=jti)
revoked_token.add()
return jsonify({"msg": "Successfully logged out"}), 200
@bp.delete("/logout/fresh")
@jwt_required(refresh=True)
def logout_refresh_token() -> str:
"""
Endpoint for revoking the current user"s refresh token
Returns
-------
str
A JSON object containing a success message
"""
jti = get_jwt()["jti"]
revoked_token = RevokedTokenModel(jti=jti)
revoked_token.add()
return jsonify({"msg": "Successfully logged out"}), 200
|
479920
|
from django.conf.urls import url, include
from rest_framework.routers import DefaultRouter
from .views import ArticleViewSet, AuthorViewSet
router = DefaultRouter()
router.register(r'articles', ArticleViewSet)
router.register(r'authors', AuthorViewSet)
urlpatterns = [
url('', include(router.urls)),
]
|
479926
|
from __future__ import with_statement
import logging
from logging.config import fileConfig
from alembic import context
from sqlalchemy import create_engine
from falcon_web_demo.persistence import get_url
config = context.config
fileConfig(config.config_file_name)
kwargs = {'as_dictionary': True}
pref = 'loggingPreference'
logging_preference = context.get_x_argument(**kwargs).get(pref)
if logging_preference:
logging.getLogger('alembic').setLevel(logging_preference)
connectable = create_engine(get_url())
with connectable.connect() as connection:
context.configure(
connection=connection,
target_metadata=None
)
with context.begin_transaction():
context.run_migrations()
|
479934
|
import re
import nltk
from nltk.corpus import stopwords
nltk.download('stopwords')
stopwords_en = set(stopwords.words('english'))
def normalize_text(string):
""" Text normalization from
https://github.com/yoonkim/CNN_sentence/blob/23e0e1f735570/process_data.py
as specified in Yao's paper.
"""
string = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", string)
string = re.sub(r"\'s", " \'s", string)
string = re.sub(r"\'ve", " \'ve", string)
string = re.sub(r"n\'t", " n\'t", string)
string = re.sub(r"\'re", " \'re", string)
string = re.sub(r"\'d", " \'d", string)
string = re.sub(r"\'ll", " \'ll", string)
string = re.sub(r",", " , ", string)
string = re.sub(r"!", " ! ", string)
string = re.sub(r"\(", " \( ", string)
string = re.sub(r"\)", " \) ", string)
string = re.sub(r"\?", " \? ", string)
string = re.sub(r"\s{2,}", " ", string)
return string.strip().lower()
def tokenize(text):
return [t for t in normalize_text(text).split() if t not in stopwords_en]
|
479961
|
from yowsup.structs import ProtocolTreeNode
from yowsup.layers.protocol_iq.protocolentities import ResultIqProtocolEntity
class SuccessRemoveParticipantsIqProtocolEntity(ResultIqProtocolEntity):
'''
<iq type="result" from="{{group_jid}}" id="{{id}}">
<remove type="success" participant="{{jid}}"></remove>
<remove type="success" participant="{{jid}}"></remove>
</iq>
'''
def __init__(self, _id, groupId, participantList):
super(SuccessRemoveParticipantsIqProtocolEntity, self).__init__(_from = groupId, _id = _id)
self.setProps(groupId, participantList)
def setProps(self, groupId, participantList):
self.groupId = groupId
self.participantList = participantList
self.action = 'remove'
def getAction(self):
return self.action
def toProtocolTreeNode(self):
node = super(SuccessRemoveParticipantsIqProtocolEntity, self).toProtocolTreeNode()
participantNodes = [
ProtocolTreeNode("remove", {
"type": "success",
"participant": participant
})
for participant in self.participantList
]
node.addChildren(participantNodes)
return node
@staticmethod
def fromProtocolTreeNode(node):
entity = super(SuccessRemoveParticipantsIqProtocolEntity, SuccessRemoveParticipantsIqProtocolEntity).fromProtocolTreeNode(node)
entity.__class__ = SuccessRemoveParticipantsIqProtocolEntity
participantList = []
for participantNode in node.getAllChildren():
if participantNode["type"]=="success":
participantList.append(participantNode["participant"])
entity.setProps(node.getAttributeValue("from"), participantList)
return entity
|
479964
|
from __future__ import print_function
import sys
import platform
import textwrap
from subprocess import check_output
from setuptools import setup, find_packages
def check_install():
"""
Try to detect the two most common installation errors:
1. Installing on macOS using a Homebrew version of Python
2. Installing on Linux using Python 2 when GDB is linked with Python 3
"""
if platform.system() == 'Darwin' and sys.executable != '/usr/bin/python':
print("*" * 79)
print(textwrap.fill(
"WARNING: You are not using the version of Python included with "
"macOS. If you intend to use Voltron with the LLDB included "
"with Xcode, or GDB installed with Homebrew, it will not work "
"unless it is installed using the system's default Python. If "
"you intend to use Voltron with a debugger installed by some "
"other method, it may be safe to ignore this warning. See the "
"following documentation for more detailed installation "
"instructions: "
"https://github.com/snare/voltron/wiki/Installation", 79))
print("*" * 79)
elif platform.system() == 'Linux':
try:
output = check_output([
"gdb", "-batch", "-q", "--nx", "-ex",
"pi print(sys.version_info.major)"
]).decode("utf-8")
gdb_python = int(output)
if gdb_python != sys.version_info.major:
print("*" * 79)
print(textwrap.fill(
"WARNING: You are installing Voltron using Python {0}.x "
"and GDB is linked with Python {1}.x. GDB will not be "
"able to load Voltron. Please install using Python {1} "
"if you intend to use Voltron with the copy of GDB that "
"is installed. See the following documentation for more "
"detailed installation instructions: "
"https://github.com/snare/voltron/wiki/Installation"
.format(sys.version_info.major, gdb_python), 79))
print("*" * 79)
except:
pass
check_install()
requirements = [
'scruffington>=0.3.6',
'flask',
'flask_restful',
'blessed',
'pygments',
'requests',
'requests_unixsocket',
'six',
'pysigset',
'pygments',
]
if sys.platform == 'win32':
requirements.append('cursor')
setup(
name="voltron",
version="0.1.7",
author="snare",
author_email="<EMAIL>",
description="A debugger UI",
license="MIT",
keywords="voltron debugger ui gdb lldb vdb "
"vivisect vtrace windbg cdb pykd",
url="https://github.com/snare/voltron",
packages=find_packages(exclude=['tests', 'examples']),
install_requires=requirements,
package_data={'voltron': ['config/*']},
entry_points={
'console_scripts': ['voltron=voltron:main'],
'pygments.lexers': [
'lldb_intel = voltron.lexers:LLDBIntelLexer',
'lldb_att = voltron.lexers:LLDBATTLexer',
'gdb_intel = voltron.lexers:GDBIntelLexer',
'gdb_att = voltron.lexers:GDBATTLexer',
'vdb_intel = voltron.lexers:VDBIntelLexer',
'vdb_att = voltron.lexers:VDBATTLexer',
'windbg_intel = voltron.lexers:WinDbgIntelLexer',
'windbg_att = voltron.lexers:WinDbgATTLexer',
'capstone_intel = voltron.lexers:CapstoneIntelLexer',
],
'pygments.styles': [
'volarized = voltron.styles:VolarizedStyle',
]
},
zip_safe=False
)
|
480059
|
import supriya.synthdefs
import supriya.ugens
__all__ = []
def _build_link_audio_synthdef(channel_count):
r"""
SynthDef("system_link_audio_" ++ i, {
arg out=0, in=16, vol=1, level=1, lag=0.05, doneAction=2;
var env = EnvGate(doneAction:doneAction, curve:'sin') * Lag.kr(vol * level, lag);
Out.ar(out, InFeedback.ar(in, i) * env);
}, [\kr, \kr, \kr, \kr, \kr, \ir]).add;
"""
name = "system_link_audio_{}".format(channel_count)
builder = supriya.synthdefs.SynthDefBuilder(
name=name, out=0, in_=16, gate=1, fade_time=0.02, done_action=2
)
with builder:
start_value = builder["fade_time"] <= 0
envelope = supriya.synthdefs.Envelope(
amplitudes=[start_value, 1.0, 0.0],
durations=[1.0, 1.0],
curves=[supriya.EnvelopeShape.SINE, -supriya.EnvelopeShape.SINE],
release_node=1.0,
)
envelope = supriya.ugens.EnvGen.kr(
done_action=builder["done_action"],
envelope=envelope,
gate=builder["gate"],
time_scale=builder["fade_time"],
)
source = supriya.ugens.InFeedback.ar(
bus=builder["in_"], channel_count=channel_count
)
supriya.ugens.Out.ar(bus=builder["out"], source=source * envelope)
globals()[name] = builder.build()
__all__.append(name)
def _build_link_control_synthdef(channel_count):
r"""
SynthDef("system_link_control_" ++ i, {
arg out=0, in=16, doneAction=2;
var env = EnvGate(doneAction:doneAction, curve:'lin');
Out.kr(out, In.kr(in, i) * env);
}, [\kr, \kr, \ir]).add;
"""
name = "system_link_control_{}".format(channel_count)
builder = supriya.synthdefs.SynthDefBuilder(
name=name, out=0, in_=16, gate=1, fade_time=0.02, done_action=2
)
with builder:
start_value = builder["fade_time"] <= 0
envelope = supriya.synthdefs.Envelope(
amplitudes=[start_value, 1.0, 0.0],
durations=[1.0, 1.0],
curves=supriya.EnvelopeShape.LINEAR,
release_node=1.0,
)
envelope = supriya.ugens.EnvGen.kr(
done_action=builder["done_action"],
envelope=envelope,
gate=builder["gate"],
time_scale=builder["fade_time"],
)
input_ = supriya.ugens.In.kr(bus=builder["in_"], channel_count=channel_count)
supriya.ugens.Out.kr(bus=builder["out"], source=input_ * envelope)
globals()[name] = builder.build()
__all__.append(name)
for i in range(1, 17):
_build_link_audio_synthdef(i)
_build_link_control_synthdef(i)
|
480066
|
from Voicelab.pipeline.Node import Node
import numpy as np
import seaborn as sns
import os
import parselmouth
from parselmouth.praat import call
import matplotlib.pyplot as plt
from Voicelab.toolkits.Voicelab.VoicelabNode import VoicelabNode
#from Voicelab.VoicelabWizard.SpectrumPlotWindow import SpectrumPlotWindow
###################################################################################################
# VISUALIZE VOICE NODE
# WARIO pipeline node for visualizing a voice as a spectrogram.
###################################################################################################
# ARGUMENTS
# 'voice' : sound file generated by parselmouth praat
###################################################################################################
# RETURNS
###################################################################################################
class VisualizeSpectrumNode(VoicelabNode):
def __init__(self, *args, **kwargs):
"""
Args:
*args:
**kwargs:
"""
super().__init__(*args, **kwargs)
self.args = {
"Max Frequency": 5500, # Max frequency, usually highest formant
"Plot LPC Curve": True,
}
###############################################################################################
# process: WARIO hook called once for each voice file.
###############################################################################################
def process(self):
sound = self.args["voice"]
max_freq = self.args["Max Frequency"]
spectrum = sound.to_spectrum()
spectrum_values = spectrum.values[0,:] + 1j * spectrum.values[1,:]
power_spectral_density = 10 * np.log10(2 * abs(spectrum_values)**2 * spectrum.dx / 4e-10)
frequencies = np.array([spectrum.get_frequency_from_bin_number(bin + 1) for bin in range(spectrum.get_number_of_bins())])
# Create subplots so we can overlay the plots
fig, ax = plt.subplots()
# First plot the spectrum as we do normally
ax.plot(frequencies, power_spectral_density, color='black', linewidth=0.25)
plt.xlim(xmin=-4e-10, xmax=5500)
plt.ylim(ymin=0, ymax=np.nanmax(power_spectral_density))
ax.set_xlabel("Frequency bin (Hz)")
ax.set_ylabel("Amplitude (dB) / Frequency(Hz)")
# Plot LPC Curve if user specifies.
if self.args["Plot LPC Curve"]:
# Measure the formants
try:
formant_path_object = call(sound,
"To FormantPath (burg)",
0.0025,
5,
max_freq,
0.025,
50,
0.025,
5)
formant_object = call(formant_path_object, "Extract Formant")
# Create LPC object
lpc = call(formant_object, 'To LPC', max_freq*2)
# Extract Spectral Density (Amplitude) and Frequency Bins from LPC analysis
lpc_spectrum = call(lpc, 'To Spectrum (slice)', 0, 20, 0, 50)
lpc_spectrum_values = lpc_spectrum.values[0,:] + 1j * lpc_spectrum.values[1,:]
lpc_power_spectral_density = 10 * np.log10(2 * abs(lpc_spectrum_values)**2 * lpc_spectrum.dx / 4e-10)
lpc_frequencies = np.array([lpc_spectrum.get_frequency_from_bin_number(bin + 1) for bin in range(lpc_spectrum.get_number_of_bins())])
# plot the LPC curve
ax.plot(lpc_frequencies, lpc_power_spectral_density, color='blue', linewidth=5)
# plt.ylim(ymin=0, ymax=np.nanmax(lpc_power_spectral_density + 10))
ax.set_xlim(xmin=-4e-10, xmax=5500)
ax.set_ylim(ymin=0, ymax=np.nanmax(lpc_power_spectral_density))
except:
print(sound)
print(self.args["file_path"])
fig = plt.figure()
plt.close(fig)
return {"spectrum": fig}
|
480088
|
import asyncio
from typing import Any, Awaitable, Callable, Dict, List, Set, Type
from loguru import logger
from rich.console import Console
from rich.logging import RichHandler
from rich.status import Status
from avilla.core.launch import LaunchComponent, resolve_requirements
from avilla.core.service import Service, TInterface
class LaunchMock:
launch_components: Dict[str, LaunchComponent]
services: List[Service]
rich_console: Console
sigexit: asyncio.Event
def __init__(self, launch_components: Dict[str, LaunchComponent], services: List[Service]):
self.launch_components = {
**launch_components,
**{i.launch_component.id: i.launch_component for i in services},
}
self.services = services
self.rich_console = Console()
def new_launch_component(
self,
id: str,
requirements: Set[str] = None,
mainline: Callable[["LaunchMock"], Awaitable[Any]] = None,
prepare: Callable[["LaunchMock"], Awaitable[Any]] = None,
cleanup: Callable[["LaunchMock"], Awaitable[Any]] = None,
) -> LaunchComponent:
component = LaunchComponent(id, requirements or set(), mainline, prepare, cleanup) # type: ignore
self.launch_components[id] = component
return component
def remove_launch_component(self, id: str):
if id not in self.launch_components:
raise KeyError("id doesn't exist.")
del self.launch_components[id]
def add_service(self, service: Service):
if service in self.services:
raise ValueError("existed service")
self.services.append(service)
launch_component = service.launch_component
self.launch_components[launch_component.id] = launch_component
def remove_service(self, service: Service):
if service not in self.services:
raise ValueError("service doesn't exist.")
self.services.remove(service)
del self.launch_components[service.launch_component.id]
def get_interface(self, interface_type: Type[TInterface]) -> TInterface:
for service in self.services:
if interface_type in service.supported_interface_types:
return service.get_interface(interface_type)
raise ValueError(f"interface type {interface_type} not supported.")
async def launch(self):
logger.configure(
handlers=[
{
"sink": RichHandler(console=self.rich_console, markup=True),
"format": "<green>{time:YYYY-MM-DD HH:mm:ss.SSS}</green> | <level>{level: <8}</level> | "
"<cyan>{name}</cyan>: <cyan>{function}</cyan>:<cyan>{line}</cyan> - <level>{message}</level>",
}
]
)
for service in self.services:
logger.info(f"using service: {service.__class__.__name__}")
logger.info(f"launch components count: {len(self.launch_components)}")
with Status("[orange bold]preparing components...", console=self.rich_console) as status:
for component_layer in resolve_requirements(set(self.launch_components.values())):
tasks = [
asyncio.create_task(component.prepare(self), name=component.id) # type: ignore
for component in component_layer
if component.prepare
]
if tasks:
for task in tasks:
task.add_done_callback(lambda t: status.update(f"{t.get_name()} prepared."))
await asyncio.wait(tasks)
status.update("all launch components prepared.")
await asyncio.sleep(1)
logger.info("[green bold]components prepared, switch to mainlines and block main thread.")
loop = asyncio.get_running_loop()
tasks = [
loop.create_task(component.mainline(self), name=component.id) # type: ignore
for component in self.launch_components.values()
if component.mainline
]
for task in tasks:
task.add_done_callback(lambda t: logger.info(f"mainline {t.get_name()} completed."))
logger.info(f"mainline count: {len(tasks)}")
try:
await asyncio.gather(*tasks)
except asyncio.CancelledError:
logger.info("[red bold]cancelled by user.")
if not self.sigexit.is_set():
self.sigexit.set()
finally:
logger.info("[red bold]all mainlines exited, cleanup start.")
for component_layer in reversed(resolve_requirements(set(self.launch_components.values()))):
tasks = [
asyncio.create_task(component.cleanup(self), name=component.id) # type: ignore
for component in component_layer
if component.cleanup
]
if tasks:
for task in tasks:
task.add_done_callback(lambda t: logger.info(f"{t.get_name()} cleanup finished."))
await asyncio.gather(*tasks)
logger.info("[green bold]cleanup finished.")
logger.warning("[red bold]exiting...")
def launch_blocking(self):
loop = asyncio.new_event_loop()
self.sigexit = asyncio.Event(loop=loop)
launch_task = loop.create_task(self.launch(), name="avilla-launch")
try:
loop.run_until_complete(launch_task)
except KeyboardInterrupt:
self.sigexit.set()
launch_task.cancel()
loop.run_until_complete(launch_task)
|
480090
|
from recordthresher.record_maker import PmhRecordMaker
from recordthresher.util import parseland_parse
class OstiRecordMaker(PmhRecordMaker):
@staticmethod
def _is_specialized_record_maker(pmh_record):
return pmh_record and pmh_record.pmh_id and pmh_record.pmh_id.startswith('oai:osti.gov:')
@classmethod
def _representative_page(cls, pmh_record):
item_id = pmh_record.pmh_id.split(':')[-1]
for repo_page in pmh_record.pages:
if repo_page.url and repo_page.url.endswith(f'osti.gov/biblio/{item_id}'):
return repo_page
return None
@classmethod
def _make_source_specific_record_changes(cls, record, pmh_record, repo_page):
if (pl_parse := parseland_parse(cls._parseland_api_url(repo_page), retry_seconds=10)) is not None:
record.set_authors(pl_parse['authors'])
record.set_published_date(pl_parse['published_date'])
record.genre = pl_parse['genre']
|
480096
|
from app import db
class Rules(db.Model):
__tablename__ = "rules"
id = db.Column(db.Integer, primary_key=True)
workload_name = db.Column(db.String(120), unique=True, nullable=False)
key_name = db.Column(db.String(20))
score = db.Column(db.DECIMAL(20, 2))
infos = db.relationship('Information', backref='rules.id')
def to_json(self):
return {
'id': self.id,
'display_name': self.workload_name,
'key_name': self.key_name,
'score': self.score
}
|
480194
|
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
def data_block_heatmaps(blocks):
"""
Plots a heat map of a bunch of data blocks
"""
num_blocks = len(blocks)
if hasattr(blocks, 'keys'):
block_names = list(blocks.keys())
else:
# block_names = ['block {}'.format(i) for i in range(len(blocks))]
block_names = list(range(len(blocks)))
for k, bn in enumerate(block_names):
plt.subplot(1, num_blocks, k + 1)
sns.heatmap(blocks[bn], xticklabels=False, yticklabels=False, cmap='RdBu')
plt.title('{}'.format(bn))
def jive_full_estimate_heatmaps(full_block_estimates, blocks):
"""
Plots the full JVIE estimates: X, J, I, E
"""
num_blocks = len(full_block_estimates)
# plt.figure(figsize=[10, num_blocks * 10])
block_names = list(full_block_estimates.keys())
for k, bn in enumerate(block_names):
# grab data
X = blocks[bn]
J = full_block_estimates[bn]['joint']
I = full_block_estimates[bn]['individual']
E = full_block_estimates[bn]['noise']
# observed data
plt.subplot(4, num_blocks, k + 1)
sns.heatmap(X, xticklabels=False, yticklabels=False, cmap='RdBu')
plt.title('{} observed data'.format(bn))
# full joint estimate
plt.subplot(4, num_blocks, k + num_blocks + 1)
sns.heatmap(J, xticklabels=False, yticklabels=False, cmap='RdBu')
plt.title('joint')
# full individual estimate
plt.subplot(4, num_blocks, k + 2 * num_blocks + 1)
sns.heatmap(I, xticklabels=False, yticklabels=False, cmap='RdBu')
plt.title('individual')
# full noise estimate
plt.subplot(4, num_blocks, k + 3 * num_blocks + 1)
sns.heatmap(E, xticklabels=False, yticklabels=False, cmap='RdBu')
plt.title('noise ')
|
480204
|
import datetime
from optuna.study import StudySummary, StudyDirection
from optuna.trial import FrozenTrial, TrialState
from optuna.distributions import distribution_to_json, json_to_distribution
def serialize_datetime(obj):
if isinstance(obj, datetime.datetime):
return {"__datetime__": True, "as_str": obj.strftime("%Y%m%dT%H:%M:%S.%f")}
return obj
def deserialize_datetime(obj):
if "__datetime__" in obj:
obj = datetime.datetime.strptime(obj["as_str"], "%Y%m%dT%H:%M:%S.%f")
return obj
def serialize_frozentrial(trial):
data = trial.__dict__.copy()
data["state"] = data["state"].name
for attr in [
"trial_id",
"number",
"params",
"user_attrs",
"system_attrs",
"distributions",
"datetime_start",
]:
data[attr] = data.pop(f"_{attr}")
data["distributions"] = {
k: distribution_to_json(v) for k, v in data["distributions"].items()
}
data["datetime_start"] = serialize_datetime(data["datetime_start"])
data["datetime_complete"] = serialize_datetime(data["datetime_complete"])
return data
def deserialize_frozentrial(data):
data["state"] = getattr(TrialState, data["state"])
data["distributions"] = {
k: json_to_distribution(v) for k, v in data["distributions"].items()
}
if data["datetime_start"] is not None:
data["datetime_start"] = deserialize_datetime(data["datetime_start"])
if data["datetime_complete"] is not None:
data["datetime_complete"] = deserialize_datetime(data["datetime_complete"])
trail = FrozenTrial(**data)
return trail
def serialize_studysummary(summary):
data = summary.__dict__.copy()
data["study_id"] = data.pop("_study_id")
data["best_trial"] = serialize_frozentrial(data["best_trial"])
data["datetime_start"] = serialize_datetime(data["datetime_start"])
data["direction"] = data["direction"]["name"]
return data
def deserialize_studysummary(data):
data["direction"] = getattr(StudyDirection, data["direction"])
data["best_trial"] = deserialize_frozentrial(data["best_trial"])
data["datetime_start"] = deserialize_datetime(data["datetime_start"])
summary = StudySummary(**data)
return summary
def serialize_studydirection(direction):
return direction.name
def deserialize_studydirection(data):
return getattr(StudyDirection, data)
|
480236
|
from Crypto.Util.number import long_to_bytes
N = 56469405750402193641449232753975279624388972985036568323092258873756801156079913882719631252209538683205353844069168609565141017503581101845476197667784484712057287713526027533597905495298848547839093455328128973319016710733533781180094847568951833393705432945294907000234880317134952746221201465210828955449
P = 23**113 + 1 + 1158518719
Q = N // P
enc = 11104433528952071860984483920122173351342473018268740572598132083816861855404615534742178674185812745207876206939230069251889172817480784782618716608299615251541018034321389516732611030641383571306414414804563863131355221859432899624060128497648444189432635603082478662202695641001726208833663163000227827283
phi = (P - 1) * (Q - 1)
d = pow(0x10001, -1, phi)
print(long_to_bytes(pow(enc, d, N)).decode())
|
480237
|
from io import BufferedIOBase
from ps1_argonaut.BaseDataClasses import BaseDataClass
from ps1_argonaut.configuration import Configuration
class ScriptData(BaseDataClass):
def __init__(self, data: bytes):
self.data = data
@property
def size(self):
return len(self.data)
@classmethod
def parse(cls, data_in: BufferedIOBase, conf: Configuration, *args, **kwargs):
super().parse(data_in, conf)
size = 4 * int.from_bytes(data_in.read(4), 'little')
return cls(data_in.read(size))
|
480248
|
from .searcher import Searcher
class SimulatedAnnealing(Searcher):
def __init__(self, population, params, generation_size=32, stabilization_limit=10):
self.population = population
Searcher.__init__(self, self.population, generation_size, stabilization_limit)
self.params = None
self.set_params(params)
def set_params(self, params):
self.params = params
def run_one(self):
pass
def get_params(self):
pass
|
480249
|
import sys
import time
import pandas as pd
from selenium import webdriver
from selenium.common.exceptions import WebDriverException, NoSuchElementException
from selenium.webdriver.common.keys import Keys
def ciceksepeti_scraper():
def initialize():
def preference(scrape_input, question):
while (scrape_input.lower() != "y") or (scrape_input.lower() != "n"):
if scrape_input.lower() == "y":
output = True
break
elif scrape_input.lower() == "n":
output = False
break
else:
print("Geçersiz yanıt.")
scrape_input = input(question)
return output
def delay_check(delay):
while type(delay) != int:
try:
delay = int(delay)
except ValueError:
print("Lütfen bir sayı değeri giriniz.")
delay = input("Bekleme süresi: ")
return delay
print("""
---------------------------------------------------------
- Çiçeksepeti Scraper'a hoş geldiniz! -
- Geliştirici: <NAME> -
---------------------------------------------------------
""")
global product_name, file, delay, review_texts, customer_province_texts, customer_name_texts, date_texts, scrape_province, scrape_customer_names, scrape_dates, path
product_name = input("İncelemelerin çekileceği ürün adı: ")
file = input("Oluşturulacak Excel dosyasının adı: ")
file = file + ".xlsx"
delay = delay_check(input("Bekleme süresi(sn): "))
review_texts = []
customer_province_texts = []
customer_name_texts = []
date_texts = []
scrape_province_question = "Müşterinin konumu çekilsin mi(y/n): "
scrape_province_input = input(scrape_province_question)
scrape_province = preference(scrape_province_input, scrape_province_question)
scrape_customer_name_question = "Müşteri isimleri çekilsin mi(y/n): "
scrape_customer_name_input = input(scrape_customer_name_question)
scrape_customer_names = preference(scrape_customer_name_input, scrape_customer_name_question)
scrape_date_question = "İnceleme tarihleri çekilsin mi(y/n): "
scrape_date_input = input(scrape_date_question)
scrape_dates = preference(scrape_date_input, scrape_date_question)
path = "BURAYA CHROMEDRIVER KONUMUNU GİRİNİZ"
def scrape():
try:
print("Chromedriver'a erişiliyor...")
driver = webdriver.Chrome(path)
time.sleep(delay)
print("Chromedriver'a erişildi.")
except WebDriverException:
print("Chromedriver kullanılamıyor.")
sys.exit()
try:
print("Çiçeksepeti adresine gidiliyor...")
driver.get("https://www.ciceksepeti.com")
time.sleep(delay)
driver.maximize_window()
time.sleep(delay)
print("Çiçeksepeti adresine gidildi.")
except:
print("Çiçeksepeti'ne erişilemiyor.")
sys.exit()
try:
print("Ürün aranıyor...")
search_bar = driver.find_element_by_class_name("product-search__input")
search_bar.send_keys(product_name)
search_bar.send_keys(Keys.ENTER)
time.sleep(delay)
product = driver.find_element_by_class_name("products__item-inner")
product.click()
time.sleep(delay)
print("Ürün bulundu.")
except NoSuchElementException:
print("Ürün bulunamadı.")
sys.exit()
see_all_reviews = driver.find_element_by_class_name("comments__all-comments")
see_all_reviews.click()
review_count = driver.find_element_by_class_name("page-comments__product-evaluation__comment-count").text.replace("Yorum", "")
review_count = int(review_count.strip("()"))
if review_count % 20 == 0:
length_of_page = review_count // 20
else:
length_of_page = (review_count // 20) + 1
l = 1
while l <= length_of_page:
print("İncelemeler çekiliyor...")
print("Sayfa: " + str(l))
time.sleep(delay)
reviews = driver.find_elements_by_class_name("page-comments__list__item")
for review in reviews:
review_text = review.find_element_by_class_name("page-comments__list__item__text").text
if review_text == "":
review_text = "BOŞ"
review_texts.append(review_text)
customer_name = review.find_element_by_class_name("page-comments__list__item__name").text
customer_name_texts.append(customer_name)
try:
review = review.text.replace(review_text, "")
except:
pass
review = review.replace(customer_name, "")
review = review.replace(" | ", "").split()
customer_province = review[0]
date = review[1]
customer_province_texts.append(customer_province)
date_texts.append(date)
try:
driver.execute_script("window.scrollTo(0, 2160)")
next_page = driver.find_element_by_class_name("cs-next")
next_page.click()
except:
pass
l += 1
driver.close()
length_list = [review_texts, customer_province_texts, customer_name_texts, date_texts]
limit = map(len, length_list)
limit = min(list(limit))
limit -= 1
review_texts_fin = review_texts[:limit]
df = pd.DataFrame({"Yorum": review_texts_fin})
if scrape_province:
customer_province_texts_fin = customer_province_texts[:limit]
df["Yorum Beğeni Sayısı"] = customer_province_texts_fin
df["Yorum Beğeni Sayısı"] = df["Yorum Beğeni Sayısı"]
if scrape_customer_names:
customer_name_texts_fin = customer_name_texts[:limit]
df["<NAME>"] = customer_name_texts_fin
if scrape_dates:
date_texts_fin = date_texts[:limit]
df["Yorumun Yazıldığı Tarih"] = date_texts_fin
df.to_excel(file, header = True, index = False)
x = "Çektiğiniz veriler " + file + " adlı excel dosyasına kaydedildi."
print(x)
print("""
--------------------------------------------------------------------------
- Projeden memnun kaldıysanız Github üzerinden yıldızlamayı unutmayın. -
- Github Hesabım: ardauzunoglu -
--------------------------------------------------------------------------
""")
initialize()
scrape()
if __name__ == "__main__":
ciceksepeti_scraper()
|
480250
|
import string
def encode(str):
s = string.ascii_letters + " "
dic = {}
enc = ""
for i in range(len(s)):
dic[s[i]] = s[(i + 3) % len(s)]
for ch in str:
enc += dic[ch]
return enc
def decode(str):
s = string.ascii_letters + " "
dic = {}
dec = ""
for i in range(len(s)):
dic[s[i]] = s[(i - 3) % len(s)]
for ch in str:
dec += dic[ch]
return dec
content = input('Enter input: ')
enc = encode(content)
with open('test.txt', 'w') as file:
file.write(enc)
with open('test.txt') as file:
encoded = file.read()
dec = decode(encoded)
print(dec)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.