id
stringlengths 3
8
| content
stringlengths 100
981k
|
---|---|
10679
|
import numpy as np
import matplotlib.pyplot as plt
TOTAL = 200
STEP = 0.25
EPS = 0.1
INITIAL_THETA = [9, 14]
def func(x):
return 0.2 * x + 3
def generate_sample(total=TOTAL):
x = 0
while x < total * STEP:
yield func(x) + np.random.uniform(-1, 1) * np.random.uniform(2, 8)
x += STEP
def cost_function(A, Y, theta):
return (Y - A@theta).T@(Y - A@theta)
def batch_descent(A, Y, speed=0.001):
theta = np.array(INITIAL_THETA.copy(), dtype=np.float32)
theta.reshape((len(theta), 1))
previous_cost = 10 ** 6
current_cost = cost_function(A, Y, theta)
while np.abs(previous_cost - current_cost) > EPS:
previous_cost = current_cost
derivatives = [0] * len(theta)
# ---------------------------------------------
for j in range(len(theta)):
summ = 0
for i in range(len(Y)):
summ += (Y[i] - A[i]@theta) * A[i][j]
derivatives[j] = summ
# Выполнение требования одновремменности
theta[0] += speed * derivatives[0]
theta[1] += speed * derivatives[1]
# ---------------------------------------------
current_cost = cost_function(A, Y, theta)
print("Batch cost:", current_cost)
plt.plot(theta[0], theta[1], 'ro')
return theta
def stochastic_descent(A, Y, speed=0.1):
theta = np.array(INITIAL_THETA.copy(), dtype=np.float32)
previous_cost = 10 ** 6
current_cost = cost_function(A, Y, theta)
while np.abs(previous_cost - current_cost) > EPS:
previous_cost = current_cost
# --------------------------------------
# for i in range(len(Y)):
i = np.random.randint(0, len(Y))
derivatives = [0] * len(theta)
for j in range(len(theta)):
derivatives[j] = (Y[i] - A[i]@theta) * A[i][j]
theta[0] += speed * derivatives[0]
theta[1] += speed * derivatives[1]
# --------------------------------------
current_cost = cost_function(A, Y, theta)
print("Stochastic cost:", current_cost)
plt.plot(theta[0], theta[1], 'ro')
return theta
X = np.arange(0, TOTAL * STEP, STEP)
Y = np.array([y for y in generate_sample(TOTAL)])
# Нормализацию вкрячил, чтобы парабалоид красивый был
X = (X - X.min()) / (X.max() - X.min())
A = np.empty((TOTAL, 2))
A[:, 0] = 1
A[:, 1] = X
theta = np.linalg.pinv(A).dot(Y)
print(theta, cost_function(A, Y, theta))
import time
start = time.clock()
theta_stochastic = stochastic_descent(A, Y, 0.1)
print("St:", time.clock() - start, theta_stochastic)
start = time.clock()
theta_batch = batch_descent(A, Y, 0.001)
print("Btch:", time.clock() - start, theta_batch)
|
10726
|
from guniflask.config import settings
from guniflask.web import blueprint, get_route
@blueprint
class ConfigController:
def __init__(self):
pass
@get_route('/settings/<name>')
def get_setting(self, name):
return {name: settings[name]}
|
10791
|
import unittest
import numpy as np
from openmdao.utils.assert_utils import assert_near_equal
from wisdem.optimization_drivers.dakota_driver import DakotaOptimizer
try:
import dakota
except ImportError:
dakota = None
@unittest.skipIf(dakota is None, "only run if Dakota is installed.")
class TestDakotaOptimization(unittest.TestCase):
def test_2D_opt_max_iterations(self):
bounds = {"x": np.array([[0.0, 1.0], [0.0, 1.0]])}
desvars = {"x": np.array([0.0, 0.25])}
outputs = ["y"]
template_dir = "template_dir/"
model_string = "from weis.multifidelity.models.testbed_components import simple_2D_high_model as model"
output_scalers = [1.0]
options = {"method": "coliny_cobyla", "max_function_evaluations": 3}
opt = DakotaOptimizer(template_dir)
results = opt.optimize(desvars, outputs, bounds, model_string, output_scalers, options)
assert_near_equal(np.min(np.array(results["y"])), -9.5)
def test_2D_opt_EGO(self):
bounds = {"x": np.array([[0.0, 1.0], [0.0, 1.0]])}
desvars = {"x": np.array([0.0, 0.25])}
outputs = ["y"]
template_dir = "template_dir/"
model_string = "from weis.multifidelity.models.testbed_components import simple_2D_high_model as model"
output_scalers = [1.0]
options = {"initial_samples": 5, "method": "efficient_global", "seed": 123456}
opt = DakotaOptimizer(template_dir)
results = opt.optimize(desvars, outputs, bounds, model_string, output_scalers, options)
assert_near_equal(np.min(np.array(results["y"])), -9.999996864)
def test_two_variables(self):
bounds = {"x": np.array([[0.0, 1.0], [0.0, 1.0]]), "z": [1.0, 2.0]}
desvars = {"x": np.array([0.0, 0.25]), "z": 1.5}
outputs = ["y"]
template_dir = "template_dir/"
model_string = "from weis.multifidelity.models.testbed_components import simple_two_variable as model"
output_scalers = [1.0]
options = {"method": "coliny_cobyla", "max_function_evaluations": 3}
opt = DakotaOptimizer(template_dir)
results = opt.optimize(desvars, outputs, bounds, model_string, output_scalers, options)
assert_near_equal(np.min(np.array(results["y"])), 1.0)
def test_constraint(self):
bounds = {"x": np.array([[0.0, 1.0], [0.0, 1.0]])}
desvars = {"x": np.array([0.0, 0.25])}
outputs = ["y", "con"]
template_dir = "template_dir/"
model_string = "from weis.multifidelity.models.testbed_components import simple_2D_low_model as model"
output_scalers = [1.0, 1.0]
options = {"method": "coliny_cobyla", "max_function_evaluations": 3}
opt = DakotaOptimizer(template_dir)
results = opt.optimize(desvars, outputs, bounds, model_string, output_scalers, options)
assert_near_equal(np.min(np.array(results["y"])), 0.5)
assert_near_equal(np.min(np.array(results["con"])), 0.0)
if __name__ == "__main__":
unittest.main()
|
10832
|
import base64
import requests
class RemotePkcs1Signer(object):
""" Client-side Signer subclass, that calls the Signing Service over HTTP to sign things """
# standard headers for request
headers = {
'Content-Type': 'application/json',
'Accept': 'application/json'
}
def __init__(self, host, port, key, algorithm="SIGNATURE_RSA_PKCS1_SHA256", keyfile=None):
"""
:param host: host of the remote HTTP service
:param port: port of the remote HTTP service
:param key: see signing_service.py, in our case we use the hash of the related cert to identify the key
:param algorithm: which algorithm to use
:param keyfile: unused, this is a wart :(
"""
self.endpoint = "http://{}:{}/".format(host, port)
self.key = key
self.algorithm = algorithm
def sign(self, data):
plaintext_base64 = base64.b64encode(data)
plaintext_key = u'0'
payload = {
"key": self.key,
"plaintext": [{
"key": plaintext_key,
"value": plaintext_base64
}],
"algorithm": self.algorithm
}
response = requests.post(self.endpoint,
headers=self.__class__.headers,
json=payload).json()
signature = base64.b64decode(response[u'signature'][plaintext_key])
return signature
|
10893
|
from pudzu.charts import *
from pudzu.sandbox.bamboo import *
import seaborn as sns
# generate map
df = pd.read_csv("datasets/euvotes.csv").set_index('country')
palette = tmap(RGBA, sns.cubehelix_palette(11, start=0.2, rot=-0.75))
ranges = [20000000,10000000,5000000,2000000,1000000,500000,200000,100000,0]
def votecolfn(n):
return palette[8 - next(i for i,x in enumerate(ranges) if n >= x)]
def colorfn(c):
if c not in df.index:
return "white" if c in ['Sea', 'Borders'] else "grey"
return votecolfn(int(df.loc[c].votes))
def labelfn(c):
if c not in df.index: return None
dfc = df.loc[c]
label = "{name} '{year}\n({votes:.2g}M)".format(name=dfc.leader.split(" ")[-1], year=dfc.year[2:], votes=int(dfc.votes) / 1000000)
return Image.from_text(label, arial(14, bold=True), align="center", padding=2)
map = map_chart("maps/Europe.png", colorfn, labelfn)
# legend
def box(c):
return Image.new("RGBA", (30, 30), c).place(Image.from_text("", arial(16, bold=True), "black", bg=c))
vote_arr = Image.from_array([
[box(votecolfn(n)), Image.from_text("<0.1M" if n < 100000 else ">{:.2g}M".format(n/1000000), arial(16), padding=(10,0))] for n in ranges
], bg="white", xalign=0)
vote_leg = Image.from_column([Image.from_text("# votes", arial(16, bold=True)), vote_arr], bg="white", xalign=0, padding=(0,5))
note_leg = Image.from_text("Multi-party national elections for executive head or party.", arial(16), max_width=100, bg="white", padding=(0,2))
legend = Image.from_column([vote_leg, note_leg], bg="white", xalign=0, padding=5).pad(1, "black")
chart = map.place(legend, align=(1,0), padding=10)
title = Image.from_column([
Image.from_text("EUROPEAN POPULAR VOTE RECORDS", arial(48, bold=True)),
Image.from_text("candidate or party with the highest absolute popular vote", arial(36))],
bg="white")
img = Image.from_column([title, chart], bg="white", padding=2)
img.place(Image.from_text("/u/Udzu", font("arial", 16), fg="black", bg="white", padding=5).pad((1,1,0,0), "black"), align=1, padding=10, copy=False)
img.save("output/euvotes.png")
|
10959
|
import sys
import unittest
sys.path.append("../main")
from sshtransport import *
class FakeSocket(object):
def __init__(self):
self.recv_buffer = b""
self.send_buffer = b""
def recv(self, n):
resp = self.recv_buffer[:n]
self.recv_buffer = self.recv_buffer[n:]
return resp
def send(self, x):
self.send_buffer += x
class TestIdentificationString(unittest.TestCase):
def test_recv(self):
conn = FakeSocket()
conn.recv_buffer = b"SSH-2.00-SecureMcShellface_1.0\r\n"
idstr = IdentificationString(recvfrom=conn)
self.assertEqual(idstr.protoversion, "2.00")
self.assertEqual(idstr.softwareversion, "SecureMcShellface_1.0")
def test_send(self):
conn = FakeSocket()
idstr = IdentificationString(protoversion="2.00", softwareversion="SecureMcShellface_1.0")
idstr.send(conn)
self.assertEqual(conn.send_buffer, b"SSH-2.00-SecureMcShellface_1.0\r\n")
class TestBinaryPacket(unittest.TestCase):
def test_recv(self):
conn = FakeSocket()
conn.recv_buffer = b"\x00\x00\x00\x14\x07Hello World!\x00\x00\x00\x00\x00\x00\x00"
binpkt = BinaryPacket(recvfrom=conn)
self.assertEqual(binpkt.payload, b"Hello World!")
self.assertEqual(binpkt.mac, b"")
def test_send(self):
conn = FakeSocket()
binpkt = BinaryPacket(payload=b"Hello World!")
binpkt.send(conn)
self.assertEqual(conn.send_buffer, b"\x00\x00\x00\x14\x07Hello World!\x00\x00\x00\x00\x00\x00\x00")
|
10982
|
import numpy as np
types = ["int", "float", "double"]
def randi(*args):
return np.random.randint(-10, 10, size=args)
rngs = {"int": randi, "float": np.random.randn, "double": np.random.randn}
embodiments = {
"function": "R.%s(A,B).AllClose(C)",
"op": "(A %s B).AllClose(C)",
"inline_op": "(R = A, R %s B).AllClose(C)",
"inline_function": "( R = A, R.%s(B) ).AllClose(C)"
}
tests = {
'+': ("Addition", "Add", [], []),
'*': ("Multiplication", "Multiply", [], []),
'-': ("Subtraction", "Subtract", [], []),
'/': ("Division", "Divide", ["int"], []),
'dp': ("Dot product", "Dot", [], ["op", "inline_op"])
}
for type in types:
rng = rngs[type]
for op, details in tests.iteritems():
test_title, function, exclude, ignore = details
if type in exclude:
break
iop = op + "="
ifunction = "Inline" + function
names = {
"function": function,
"op": op,
"inline_op": iop,
"inline_function": ifunction
}
n = 7
m = 7
A = rng(n, m)
B = rng(n, m)
if op == "+":
C = A + B
elif op == "/":
C = A / B
elif op == "-":
C = A - B
elif op == "*":
C = A * B
elif op == "dp":
C = np.dot(A, B)
m1 = " ;\n".join([" ".join([str(y) for y in x]) for x in A])
m2 = " ;\n".join([" ".join([str(y) for y in x]) for x in B])
m3 = " ;\n".join([" ".join([str(y) for y in x]) for x in C])
print """
SCENARIO("%s") {
_M<%s> A,B,C,R;
R.Resize( %d, %d );
A = _M<%s>(R\"(\n%s\n)\");
B = _M<%s>(R\"(\n%s\n)\");
C = _M<%s>(R\"(\n%s\n)\");
""" % (test_title + " for " + type, type, n, m, type, m1, type, m2, type, m3)
for method, emb in embodiments.iteritems():
if method in ignore:
continue
name = names[method]
tt = emb % name
print "EXPECT( %s );" % tt
print "};"
print
|
11000
|
import os.path
import tcprepl
import BigWorld
def echo(s):
'''Send string to client'''
if tcprepl.write_client is not None:
tcprepl.write_client(s)
def exec_file(filename, exec_globals=None):
'''
Execute file
Try to find file named `filename` and execute it. If `exec_globals` is
specified it is used as globals-dict in exec context.
'''
if exec_globals is None:
exec_globals = {}
if not os.path.isfile(filename):
filename = BigWorld.wg_resolveFileName(filename)
with open(filename, 'r') as f:
code = f.read()
exec code in exec_globals
|
11024
|
import os.path as osp
import sys
import subprocess
subprocess.call(['pip', 'install', 'cvbase'])
import cvbase as cvb
import torch
from torch.autograd import gradcheck
sys.path.append(osp.abspath(osp.join(__file__, '../../')))
from biupdownsample import biupsample_naive, BiupsampleNaive
from biupdownsample import bidownsample_naive, BidownsampleNaive
feat = torch.randn(2, 64, 2, 2, requires_grad=True, device='cuda:0').double()
mask = torch.randn(
2, 100, 4, 4, requires_grad=True, device='cuda:0').sigmoid().double()
print('Gradcheck for biupsample naive...')
test = gradcheck(BiupsampleNaive(5, 4, 2), (feat, mask), atol=1e-4, eps=1e-4)
print(test)
feat = torch.randn(
2, 1024, 100, 100, requires_grad=True, device='cuda:0').float()
mask = torch.randn(
2, 25, 200, 200, requires_grad=True, device='cuda:0').sigmoid().float()
loop_num = 500
time_naive_forward = 0
time_naive_backward = 0
bar = cvb.ProgressBar(loop_num)
timer = cvb.Timer()
for i in range(loop_num):
x = biupsample_naive(feat.clone(), mask.clone(), 5, 1, 2)
torch.cuda.synchronize()
time_naive_forward += timer.since_last_check()
x.sum().backward(retain_graph=True)
torch.cuda.synchronize()
time_naive_backward += timer.since_last_check()
bar.update()
forward_speed = (time_naive_forward + 1e-3) * 1e3 / loop_num
backward_speed = (time_naive_backward + 1e-3) * 1e3 / loop_num
print('\nBiupsample naive time forward: '
f'{forward_speed} ms/iter | time backward: {backward_speed} ms/iter')
# ---------------------------------------------------------------
feat = torch.randn(2, 64, 4, 4, requires_grad=True, device='cuda:0').double()
mask = torch.randn(
2, 16, 4, 4, requires_grad=True, device='cuda:0').double()
print('Gradcheck for bidownsample naive...')
test = gradcheck(BidownsampleNaive(4, 1, 1), (feat, mask), atol=1e-4, eps=1e-4)
print(test)
feat = torch.randn(
2, 512, 200, 200, requires_grad=True, device='cuda:0').float()
mask = torch.randn(
2, 100, 100, 100, requires_grad=True, device='cuda:0').sigmoid().float()
loop_num = 500
time_naive_forward = 0
time_naive_backward = 0
bar = cvb.ProgressBar(loop_num)
timer = cvb.Timer()
for i in range(loop_num):
x = bidownsample_naive(feat.clone(), mask.clone(), 10, 1, 2)
torch.cuda.synchronize()
time_naive_forward += timer.since_last_check()
x.sum().backward(retain_graph=True)
torch.cuda.synchronize()
time_naive_backward += timer.since_last_check()
bar.update()
forward_speed = (time_naive_forward + 1e-3) * 1e3 / loop_num
backward_speed = (time_naive_backward + 1e-3) * 1e3 / loop_num
print('\nBidownsample naive time forward: '
f'{forward_speed} ms/iter | time backward: {backward_speed} ms/iter')
|
11047
|
import torch
from torch import nn
from torch.nn import functional as F
class BicubicDownSample(nn.Module):
def bicubic_kernel(self, x, a=-0.50):
"""
This equation is exactly copied from the website below:
https://clouard.users.greyc.fr/Pantheon/experiments/rescaling/index-en.html#bicubic
"""
abs_x = torch.abs(x)
if abs_x <= 1.0:
return (a + 2.0) * torch.pow(abs_x, 3.0) - (a + 3.0) * torch.pow(abs_x, 2.0) + 1
elif 1.0 < abs_x < 2.0:
return a * torch.pow(abs_x, 3) - 5.0 * a * torch.pow(abs_x, 2.0) + 8.0 * a * abs_x - 4.0 * a
else:
return 0.0
def __init__(self, factor=4, cuda=True, padding="reflect"):
super().__init__()
self.factor = factor
size = factor * 4
k = torch.tensor(
[self.bicubic_kernel((i - torch.floor(torch.tensor(size / 2)) + 0.5) / factor) for i in range(size)],
dtype=torch.float32,
)
k = k / torch.sum(k)
# k = torch.einsum('i,j->ij', (k, k))
k1 = torch.reshape(k, shape=(1, 1, size, 1))
self.k1 = torch.cat([k1, k1, k1], dim=0)
k2 = torch.reshape(k, shape=(1, 1, 1, size))
self.k2 = torch.cat([k2, k2, k2], dim=0)
self.cuda = ".cuda" if cuda else ""
self.padding = padding
for param in self.parameters():
param.requires_grad = False
def forward(self, x, nhwc=False, clip_round=False, byte_output=False):
# x = torch.from_numpy(x).type('torch.FloatTensor')
filter_height = self.factor * 4
filter_width = self.factor * 4
stride = self.factor
pad_along_height = max(filter_height - stride, 0)
pad_along_width = max(filter_width - stride, 0)
filters1 = self.k1.type("torch{}.FloatTensor".format(self.cuda))
filters2 = self.k2.type("torch{}.FloatTensor".format(self.cuda))
# compute actual padding values for each side
pad_top = pad_along_height // 2
pad_bottom = pad_along_height - pad_top
pad_left = pad_along_width // 2
pad_right = pad_along_width - pad_left
# apply mirror padding
if nhwc:
x = torch.transpose(torch.transpose(x, 2, 3), 1, 2) # NHWC to NCHW
# downscaling performed by 1-d convolution
x = F.pad(x, (0, 0, pad_top, pad_bottom), self.padding)
x = F.conv2d(input=x, weight=filters1, stride=(stride, 1), groups=3)
if clip_round:
x = torch.clamp(torch.round(x), 0.0, 255.0)
x = F.pad(x, (pad_left, pad_right, 0, 0), self.padding)
x = F.conv2d(input=x, weight=filters2, stride=(1, stride), groups=3)
if clip_round:
x = torch.clamp(torch.round(x), 0.0, 255.0)
if nhwc:
x = torch.transpose(torch.transpose(x, 1, 3), 1, 2)
if byte_output:
return x.type("torch.{}.ByteTensor".format(self.cuda))
else:
return x
|
11052
|
import nose2.tools
from typing import Union
from app.util import has_attributes
class SampleClass:
pass
class TestUtil:
@nose2.tools.params(
('SET_VALUE', True),
(None, False),
('NO_ATTRIBUTE', False),
(False, True),
('', True),
(0, True),
)
def test_has_attributes(self, value: Union[bool, int, str, None], ans: bool) -> None:
obj = SampleClass()
if value != 'NO_ATTRIBUTE':
setattr(obj, 'attr', value)
has_attr = has_attributes(obj, 'attr')
assert has_attr is ans
|
11065
|
import os
class config:
host = 'zhangxuanyang.zhangxuanyang.ws2.hh-c.brainpp.cn'
username = 'admin'
port = 5672
exp_name = os.path.dirname(os.path.abspath(__file__))
exp_name = '-'.join(i for i in exp_name.split(os.path.sep) if i);
test_send_pipe = exp_name + '-test-send_pipe'
test_recv_pipe = exp_name + '-test-recv_pipe'
net_cache = 'model_and_data/checkpoint_epoch_50.pth.tar'
initial_net_cache = 'model_and_data/checkpoint_epoch_0.pth.tar'
layers = 14
edges = 14
model_input_size = (1, 3, 224, 224)
# Candidate operators
blocks_keys = [
'none',
'max_pool_3x3',
'avg_pool_3x3',
'skip_connect',
'sep_conv_3x3',
'sep_conv_5x5',
'dil_conv_3x3',
'dil_conv_5x5'
]
op_num = len(blocks_keys)
# Operators encoding
NONE = 0
MAX_POOLING_3x3 = 1
AVG_POOL_3x3 = 2
SKIP_CONNECT = 3
SEP_CONV_3x3 = 4
SEP_CONV_5x5 = 5
DIL_CONV_3x3 = 6
DIL_CONV_5x5 = 7
time_limit=None
#time_limit=0.050
speed_input_shape=[32,3,224,224]
flops_limit=True
max_flops=600*1e6
# max_flops=None
max_epochs=20
select_num = 10
population_num = 50
mutation_num = 25
m_prob = 0.1
crossover_num = 25
momentum = 0.7
eps = 1e-5
# Enumerate all paths of a single cell
paths = [[0, 2, 3, 4, 5], [0, 2, 3, 5], [0, 2, 4, 5], [0, 2, 5], [0, 3, 4, 5], [0, 3, 5], [0, 4, 5], [0, 5],
[1, 2, 3, 4, 5], [1, 2, 3, 5], [1, 2, 4, 5], [1, 2, 5], [1, 3, 4, 5], [1, 3, 5], [1, 4, 5], [1, 5],
[0, 2, 3, 4], [0, 2, 4], [0, 3, 4], [0, 4],
[1, 2, 3, 4], [1, 2, 4], [1, 3, 4], [1, 4],
[0, 2, 3], [0, 3],
[1, 2, 3], [1, 3],
[0, 2],
[1, 2]]
for i in ['exp_name']:
print('{}: {}'.format(i,eval('config.{}'.format(i))))
|
11067
|
from btypes.big_endian import *
cstring_sjis = CString('shift-jis')
class Header(Struct):
string_count = uint16
__padding__ = Padding(2)
class Entry(Struct):
string_hash = uint16
string_offset = uint16
def unsigned_to_signed_byte(b):
return b - 0x100 if b & 0x80 else b
def calculate_hash(string):
h = 0
for b in string:
h = (h*3 + unsigned_to_signed_byte(b)) & 0xFFFF
return h
def pack(stream, strings):
strings = [string.encode('shift-jis') for string in strings]
header = Header()
header.string_count = len(strings)
Header.pack(stream, header)
offset = Header.sizeof() + Entry.sizeof()*len(strings)
for string in strings:
entry = Entry()
entry.string_hash = calculate_hash(string)
entry.string_offset = offset
Entry.pack(stream, entry)
offset += len(string) + 1
for string in strings:
stream.write(string)
stream.write(b'\0')
def unpack(stream):
base = stream.tell()
header = Header.unpack(stream)
entries = [Entry.unpack(stream) for _ in range(header.string_count)]
strings = []
for entry in entries:
stream.seek(base + entry.string_offset)
strings.append(cstring_sjis.unpack(stream))
return strings
|
11118
|
r"""Train a neural network to predict feedback for a program string."""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import os
import sys
import random
import numpy as np
from tqdm import tqdm
import torch
import torch.optim as optim
import torch.utils.data as data
import torch.nn.functional as F
from .models import ProgramRNN
from .utils import AverageMeter, save_checkpoint, merge_args_with_dict
from .datasets import load_dataset
from .config import default_hyperparams
from .rubric_utils.load_params import get_label_params, get_max_seq_len
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('dataset', type=str, help='annotated|synthetic')
parser.add_argument('problem_id', type=int, help='1|2|3|4|5|6|7|8')
parser.add_argument('out_dir', type=str, help='where to save outputs')
parser.add_argument('--cuda', action='store_true', default=False,
help='enables CUDA training [default: False]')
args = parser.parse_args()
args.cuda = args.cuda and torch.cuda.is_available()
merge_args_with_dict(args, default_hyperparams)
device = torch.device('cuda' if args.cuda else 'cpu')
args.max_seq_len = get_max_seq_len(args.problem_id)
label_dim, _, _, _, _ = get_label_params(args.problem_id)
# reproducibility
torch.manual_seed(args.seed)
np.random.seed(args.seed)
if not os.path.isdir(args.out_dir):
os.makedirs(args.out_dir)
train_dataset = load_dataset( args.dataset, args.problem_id, 'train', vocab=None,
max_seq_len=args.max_seq_len, min_occ=args.min_occ)
val_dataset = load_dataset( args.dataset, args.problem_id, 'val', vocab=train_dataset.vocab,
max_seq_len=args.max_seq_len, min_occ=args.min_occ)
test_dataset = load_dataset(args.dataset, args.problem_id, 'test', vocab=train_dataset.vocab,
max_seq_len=args.max_seq_len, min_occ=args.min_occ)
train_loader = data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True)
val_loader = data.DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False)
test_loader = data.DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False)
model = ProgramRNN( args.z_dim, label_dim, train_dataset.vocab_size, embedding_dim=args.embedding_dim,
hidden_dim=args.hidden_dim, num_layers=args.num_layers)
model = model.to(device)
optimizer = optim.Adam(model.parameters(), lr=args.lr)
def train(epoch):
model.train()
loss_meter = AverageMeter()
acc_meter = AverageMeter()
for batch_idx, (seq, length, label, _) in enumerate(train_loader):
assert label is not None
batch_size = len(seq)
seq = seq.to(device)
length = length.to(device)
label = label.to(device)
optimizer.zero_grad()
label_out = model(seq, length)
loss = F.binary_cross_entropy(label_out, label)
loss.backward()
loss_meter.update(loss.item(), batch_size)
optimizer.step()
acc = np.mean(torch.round(label_out).detach().numpy() == label.detach().numpy())
acc_meter.update(acc, batch_size)
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\tAccuracy: {:.4f}'.format(
epoch, batch_idx * batch_size, len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss_meter.avg,
acc_meter.avg))
print('====> Epoch: {}\tLoss: {:.4f}\tAccuracy: {:.4f}'.format(
epoch, loss_meter.avg, acc_meter.avg))
return loss_meter.avg, acc_meter.avg
def test(epoch, loader, name='Test'):
model.eval()
loss_meter = AverageMeter()
acc_meter = AverageMeter()
with torch.no_grad():
with tqdm(total=len(loader)) as pbar:
for (seq, length, label, _) in loader:
assert label is not None
batch_size = len(seq)
seq = seq.to(device)
length = length.to(device)
label = label.to(device)
label_out = model(seq, length)
loss = F.binary_cross_entropy(label_out, label)
loss_meter.update(loss.item(), batch_size)
acc = np.mean(torch.round(label_out.cpu()).numpy() == label.cpu().numpy())
acc_meter.update(acc, batch_size)
pbar.update()
print('====> {} Epoch: {}\tLoss: {:.4f}\tAccuracy: {:.4f}'.format(
name, epoch, loss_meter.avg, acc_meter.avg))
return loss_meter.avg, acc_meter.avg
best_loss = sys.maxint
track_train_loss = np.zeros(args.epochs)
track_val_loss = np.zeros(args.epochs)
track_test_loss = np.zeros(args.epochs)
track_train_acc = np.zeros(args.epochs)
track_val_acc = np.zeros(args.epochs)
track_test_acc = np.zeros(args.epochs)
for epoch in xrange(1, args.epochs + 1):
train_loss, train_acc = train(epoch)
val_loss, val_acc = test(epoch, val_loader, name='Val')
test_loss, test_acc = test(epoch, test_loader, name='Test')
track_train_loss[epoch - 1] = train_loss
track_val_loss[epoch - 1] = val_loss
track_test_loss[epoch - 1] = test_loss
track_train_acc[epoch - 1] = train_acc
track_val_acc[epoch - 1] = val_acc
track_test_acc[epoch - 1] = test_acc
is_best = val_loss < best_loss
best_loss = min(val_loss, best_loss)
save_checkpoint({
'state_dict': model.state_dict(),
'cmd_line_args': args,
'vocab': train_dataset.vocab,
}, is_best, folder=args.out_dir)
np.save(os.path.join(args.out_dir, 'train_loss.npy'), track_train_loss)
np.save(os.path.join(args.out_dir, 'val_loss.npy'), track_val_loss)
np.save(os.path.join(args.out_dir, 'test_loss.npy'), track_test_loss)
np.save(os.path.join(args.out_dir, 'train_acc.npy'), track_train_acc)
np.save(os.path.join(args.out_dir, 'val_acc.npy'), track_val_acc)
np.save(os.path.join(args.out_dir, 'test_acc.npy'), track_test_acc)
|
11138
|
import os
from os import getcwd
#---------------------------------------------#
# 训练前一定要注意修改classes
# 种类顺序需要和model_data下的txt一样
#---------------------------------------------#
classes = ["cat", "dog"]
sets = ["train", "test"]
wd = getcwd()
for se in sets:
list_file = open('cls_' + se + '.txt', 'w')
datasets_path = "datasets/" + se
types_name = os.listdir(datasets_path)
for type_name in types_name:
if type_name not in classes:
continue
cls_id = classes.index(type_name)
photos_path = os.path.join(datasets_path, type_name)
photos_name = os.listdir(photos_path)
for photo_name in photos_name:
_, postfix = os.path.splitext(photo_name)
if postfix not in ['.jpg', '.png', '.jpeg']:
continue
list_file.write(str(cls_id) + ";" + '%s/%s'%(wd, os.path.join(photos_path, photo_name)))
list_file.write('\n')
list_file.close()
|
11160
|
from nerwhal.backends.flashtext_backend import FlashtextBackend
from nerwhal.recognizer_bases import FlashtextRecognizer
def test_single_recognizer(embed):
class TestRecognizer(FlashtextRecognizer):
TAG = "XX"
SCORE = 1.0
@property
def keywords(self):
return ["abc", "cde"]
backend = FlashtextBackend()
backend.register_recognizer(TestRecognizer)
text = "Das ist abc und cde."
ents = backend.run(text)
assert embed(text, ents) == "Das ist XX und XX."
assert ents[0].start_char == 8
assert ents[0].end_char == 11
assert ents[0].tag == "XX"
assert ents[0].text == "abc"
assert ents[0].score == 1.0
assert ents[0].recognizer == "TestRecognizer"
def test_multiple_recognizers(embed):
class TestRecognizerA(FlashtextRecognizer):
TAG = "A"
SCORE = 1.0
@property
def keywords(self):
return ["abc"]
class TestRecognizerB(FlashtextRecognizer):
TAG = "B"
SCORE = 0.5
@property
def keywords(self):
return ["cde"]
backend = FlashtextBackend()
backend.register_recognizer(TestRecognizerA)
backend.register_recognizer(TestRecognizerB)
text = "Das ist abc und cde."
ents = backend.run(text)
assert embed(text, ents) == "Das ist A und B."
assert ents[0].tag == "A"
assert ents[0].score == 1.0
assert ents[1].tag == "B"
assert ents[1].score == 0.5
def test_overlapping_recognizers(embed):
class TestRecognizerA(FlashtextRecognizer):
TAG = "A"
SCORE = 1.0
@property
def keywords(self):
return ["abc", "cde"]
class TestRecognizerB(FlashtextRecognizer):
TAG = "B"
SCORE = 0.5
@property
def keywords(self):
return ["cde", "fgh"]
backend = FlashtextBackend()
backend.register_recognizer(TestRecognizerA)
backend.register_recognizer(TestRecognizerB)
text = "Das ist cde."
ents = backend.run(text)
# Recognizer B overwrites the keyword "cde"
assert embed(text, ents) == "Das ist B."
|
11161
|
import boto3
import sys
import time
import logging
import getpass
def new_rdsmysql(dbname, instanceID, storage, dbInstancetype, dbusername):
masterPass = getpass.getpass('DBMasterPassword: ')
if len(masterPass) < 10:
logging.warning('Password is not at least 10 characters. Please try again')
time.sleep(5)
exit
else:
None
try:
rds_instance = boto3.client('rds')
create_instance = rds_instance.create_db_instance(
DBName = dbname,
DBInstanceIdentifier = instanceID,
AllocatedStorage = int(storage),
DBInstanceClass = dbInstancetype,
Engine = 'mysql',
MasterUsername = dbusername,
MasterUserPassword = str(<PASSWORD>),
MultiAZ = True,
EngineVersion = '5.7.23',
AutoMinorVersionUpgrade = False,
LicenseModel = 'general-public-license',
PubliclyAccessible = False,
Tags = [
{
'Key': 'Name',
'Value' : dbname
}
]
)
print(create_instance)
except Exception as e:
logging.warning('An error has occured')
print(e)
dbname = sys.argv[1]
instanceID = sys.argv[2]
storage = sys.argv[3]
dbInstancetype = sys.argv[4]
dbusername = sys.argv[5]
new_rdsmysql(dbname, instanceID, storage, dbInstancetype, dbusername)
|
11163
|
import pytest
import cudf
import mock
from cuxfilter.charts.core.non_aggregate.core_non_aggregate import (
BaseNonAggregate,
)
from cuxfilter.dashboard import DashBoard
from cuxfilter import DataFrame
from cuxfilter.layouts import chart_view
class TestCoreNonAggregateChart:
def test_variables(self):
bnac = BaseNonAggregate()
# BaseChart variables
assert bnac.chart_type is None
assert bnac.x is None
assert bnac.y is None
assert bnac.aggregate_fn == "count"
assert bnac.color is None
assert bnac.height == 0
assert bnac.width == 0
assert bnac.add_interaction is True
assert bnac.chart is None
assert bnac.source is None
assert bnac.source_backup is None
assert bnac.data_points == 0
assert bnac._library_specific_params == {}
assert bnac.stride is None
assert bnac.stride_type == int
assert bnac.min_value == 0.0
assert bnac.max_value == 0.0
assert bnac.x_label_map == {}
assert bnac.y_label_map == {}
assert bnac.title == ""
# test chart name setter
bnac.x = "x"
bnac.y = "y"
bnac.chart_type = "test_chart_type"
assert bnac.name == "x_y_count_test_chart_type_"
# BaseNonAggregateChart variables
assert bnac.use_data_tiles is False
assert bnac.reset_event is None
assert bnac.x_range is None
assert bnac.y_range is None
assert bnac.aggregate_col is None
def test_label_mappers(self):
bnac = BaseNonAggregate()
library_specific_params = {
"x_label_map": {"a": 1, "b": 2},
"y_label_map": {"a": 1, "b": 2},
}
bnac.library_specific_params = library_specific_params
assert bnac.x_label_map == {"a": 1, "b": 2}
assert bnac.y_label_map == {"a": 1, "b": 2}
@pytest.mark.parametrize("chart, _chart", [(None, None), (1, 1)])
def test_view(self, chart, _chart):
bnac = BaseNonAggregate()
bnac.chart = chart
bnac.width = 400
bnac.title = "test_title"
assert str(bnac.view()) == str(
chart_view(_chart, width=bnac.width, title=bnac.title)
)
def test_get_selection_geometry_callback(self):
bnac = BaseNonAggregate()
df = cudf.DataFrame({"a": [1, 2, 2], "b": [3, 4, 5]})
dashboard = DashBoard(dataframe=DataFrame.from_dataframe(df))
assert (
bnac.get_selection_geometry_callback(dashboard).__name__
== "selection_callback"
)
assert callable(type(bnac.get_selection_geometry_callback(dashboard)))
def test_box_selection_callback(self):
bnac = BaseNonAggregate()
bnac.x = "a"
bnac.y = "b"
bnac.chart_type = "temp"
self.result = None
def t_function(data, patch_update=False):
self.result = data
bnac.reload_chart = t_function
df = cudf.DataFrame({"a": [1, 2, 2], "b": [3, 4, 5]})
dashboard = DashBoard(dataframe=DataFrame.from_dataframe(df))
dashboard._active_view = bnac
class evt:
geometry = dict(x0=1, x1=2, y0=3, y1=4, type="rect")
t = bnac.get_selection_geometry_callback(dashboard)
t(evt)
assert self.result.equals(df.query("1<=a<=2 and 3<=b<=4"))
def test_lasso_election_callback(self):
bnac = BaseNonAggregate()
bnac.x = "a"
bnac.y = "b"
bnac.chart_type = "temp"
def t_function(data, patch_update=False):
self.result = data
bnac.reload_chart = t_function
df = cudf.DataFrame({"a": [1, 2, 2], "b": [3, 4, 5]})
dashboard = DashBoard(dataframe=DataFrame.from_dataframe(df))
class evt:
geometry = dict(x=[1, 1, 2], y=[1, 2, 1], type="poly")
final = True
t = bnac.get_selection_geometry_callback(dashboard)
with mock.patch("cuspatial.point_in_polygon") as pip:
pip.return_value = cudf.DataFrame(
{"selection": [True, False, True]}
)
t(evt)
assert pip.called
@pytest.mark.parametrize(
"data, _data",
[
(cudf.DataFrame(), cudf.DataFrame()),
(
cudf.DataFrame({"a": [1, 2, 2], "b": [3, 4, 5]}),
cudf.DataFrame({"a": [1, 2, 2], "b": [3, 4, 5]}),
),
],
)
def test_calculate_source(self, data, _data):
"""
Calculate source just calls to the format_source_data function
which is implemented by chart types inheriting this class.
"""
bnac = BaseNonAggregate()
self.result = None
def t_function(data, patch_update=False):
self.result = data
bnac.format_source_data = t_function
bnac.calculate_source(data)
assert self.result.equals(_data)
@pytest.mark.parametrize(
"x_range, y_range, query, local_dict",
[
(
(1, 2),
(3, 4),
"@x_min<=x<=@x_max and @y_min<=y<=@y_max",
{"x_min": 1, "x_max": 2, "y_min": 3, "y_max": 4},
),
(
(0, 2),
(3, 5),
"@x_min<=x<=@x_max and @y_min<=y<=@y_max",
{"x_min": 0, "x_max": 2, "y_min": 3, "y_max": 5},
),
],
)
def test_compute_query_dict(self, x_range, y_range, query, local_dict):
bnac = BaseNonAggregate()
bnac.chart_type = "test"
bnac.x = "x"
bnac.y = "y"
bnac.x_range = x_range
bnac.y_range = y_range
df = cudf.DataFrame({"x": [1, 2, 2], "y": [3, 4, 5]})
dashboard = DashBoard(dataframe=DataFrame.from_dataframe(df))
bnac.compute_query_dict(
dashboard._query_str_dict, dashboard._query_local_variables_dict
)
bnac_key = (
f"{bnac.x}_{bnac.y}"
f"{'_' + bnac.aggregate_col if bnac.aggregate_col else ''}"
f"_{bnac.aggregate_fn}_{bnac.chart_type}_{bnac.title}"
)
assert dashboard._query_str_dict[bnac_key] == query
for key in local_dict:
assert (
dashboard._query_local_variables_dict[key] == local_dict[key]
)
@pytest.mark.parametrize(
"add_interaction, reset_event, event_1, event_2",
[
(True, None, "selection_callback", None),
(True, "test_event", "selection_callback", "reset_callback"),
(False, "test_event", None, "reset_callback"),
],
)
def test_add_events(self, add_interaction, reset_event, event_1, event_2):
bnac = BaseNonAggregate()
bnac.add_interaction = add_interaction
bnac.reset_event = reset_event
df = cudf.DataFrame({"a": [1, 2, 2], "b": [3, 4, 5]})
dashboard = DashBoard(dataframe=DataFrame.from_dataframe(df))
self.event_1 = None
self.event_2 = None
def t_func(fn):
self.event_1 = fn.__name__
def t_func1(event, fn):
self.event_2 = fn.__name__
bnac.add_selection_geometry_event = t_func
bnac.add_event = t_func1
bnac.add_events(dashboard)
assert self.event_1 == event_1
assert self.event_2 == event_2
def test_add_reset_event(self):
bnac = BaseNonAggregate()
bnac.chart_type = "test"
bnac.x = "a"
bnac.x_range = (0, 2)
bnac.y_range = (3, 5)
df = cudf.DataFrame({"a": [1, 2, 2], "b": [3, 4, 5]})
dashboard = DashBoard(dataframe=DataFrame.from_dataframe(df))
dashboard._active_view = bnac
def t_func1(event, fn):
fn("event")
bnac.add_event = t_func1
bnac.add_reset_event(dashboard)
assert bnac.x_range is None
assert bnac.y_range is None
def test_query_chart_by_range(self):
bnac = BaseNonAggregate()
bnac.chart_type = "test"
bnac.x = "a"
bnac_1 = BaseNonAggregate()
bnac_1.chart_type = "test"
bnac_1.x = "b"
query_tuple = (4, 5)
df = cudf.DataFrame({"a": [1, 2, 3, 4], "b": [3, 4, 5, 6]})
bnac.source = df
self.result = None
self.patch_update = None
def t_func(data, patch_update):
self.result = data
self.patch_update = patch_update
# creating a dummy reload chart fn as its not implemented in core
# non aggregate chart class
bnac.reload_chart = t_func
bnac.query_chart_by_range(
active_chart=bnac_1, query_tuple=query_tuple, datatile=None
)
assert self.result.to_string() == " a b\n1 2 4\n2 3 5"
assert self.patch_update is False
@pytest.mark.parametrize(
"new_indices, result",
[
([4, 5], " a b\n1 2 4\n2 3 5"),
([], " a b\n0 1 3\n1 2 4\n2 3 5\n3 4 6"),
([3], " a b\n0 1 3"),
],
)
def test_query_chart_by_indices(self, new_indices, result):
bnac = BaseNonAggregate()
bnac.chart_type = "test"
bnac.x = "a"
bnac_1 = BaseNonAggregate()
bnac_1.chart_type = "test"
bnac_1.x = "b"
new_indices = new_indices
df = cudf.DataFrame({"a": [1, 2, 3, 4], "b": [3, 4, 5, 6]})
bnac.source = df
self.result = None
self.patch_update = None
def t_func(data, patch_update):
self.result = data
self.patch_update = patch_update
# creating a dummy reload chart fn as its not implemented in core
# non aggregate chart class
bnac.reload_chart = t_func
bnac.query_chart_by_indices(
active_chart=bnac_1,
old_indices=[],
new_indices=new_indices,
datatile=None,
)
assert self.result.to_string() == result
assert self.patch_update is False
|
11175
|
import numpy as np
import ROOT
from dummy_distributions import dummy_pt_eta
counts, test_in1, test_in2 = dummy_pt_eta()
f = ROOT.TFile.Open("samples/testSF2d.root")
sf = f.Get("scalefactors_Tight_Electron")
xmin, xmax = sf.GetXaxis().GetXmin(), sf.GetXaxis().GetXmax()
ymin, ymax = sf.GetYaxis().GetXmin(), sf.GetYaxis().GetXmax()
test_out = np.empty_like(test_in1)
for i, (eta, pt) in enumerate(zip(test_in1, test_in2)):
if xmax <= eta:
eta = xmax - 1.0e-5
elif eta < xmin:
eta = xmin
if ymax <= pt:
pt = ymax - 1.0e-5
elif pt < ymin:
pt = ymin
ib = sf.FindBin(eta, pt)
test_out[i] = sf.GetBinContent(ib)
print(repr(test_out))
|
11184
|
import argparse
import matplotlib.pyplot as plt
import torch
from pytorch_warmup import *
def get_rates(warmup_cls, beta2, max_step):
rates = []
p = torch.nn.Parameter(torch.arange(10, dtype=torch.float32))
optimizer = torch.optim.Adam([{'params': p}], lr=1.0, betas=(0.9, beta2))
lr_scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda step: 1.0)
warmup_scheduler = warmup_cls(optimizer)
for step in range(1, max_step+1):
rates.append(optimizer.param_groups[0]['lr'])
optimizer.zero_grad()
optimizer.step()
lr_scheduler.step()
warmup_scheduler.dampen()
return rates
parser = argparse.ArgumentParser(description='Warmup schedule')
parser.add_argument('--output', type=str, default='none',
choices=['none', 'png', 'pdf'],
help='Output file type (default: none)')
args = parser.parse_args()
beta2 = 0.999
max_step = 3000
plt.plot(range(1, max_step+1), get_rates(RAdamWarmup, beta2, max_step), label='RAdam')
plt.plot(range(1, max_step+1), get_rates(UntunedExponentialWarmup, beta2, max_step), label='Untuned Exponential')
plt.plot(range(1, max_step+1), get_rates(UntunedLinearWarmup, beta2, max_step), label='Untuned Linear')
plt.legend()
plt.title('Warmup Schedule')
plt.xlabel('Iteration')
plt.ylabel(r'Warmup factor $(\omega_t)$')
if args.output == 'none':
plt.show()
else:
plt.savefig(f'warmup_schedule.{args.output}')
|
11199
|
from __future__ import absolute_import
from io import BytesIO
import zstd
from .base import BaseCompressor, BaseDecompressor
from ..protocol import CompressionMethod, CompressionMethodByte
from ..reader import read_binary_uint32
from ..writer import write_binary_uint32, write_binary_uint8
class Compressor(BaseCompressor):
method = CompressionMethod.ZSTD
method_byte = CompressionMethodByte.ZSTD
def get_compressed_data(self, extra_header_size):
rv = BytesIO()
data = self.get_value()
compressed = zstd.compress(data)
header_size = extra_header_size + 4 + 4 # sizes
write_binary_uint32(header_size + len(compressed), rv)
write_binary_uint32(len(data), rv)
rv.write(compressed)
return rv.getvalue()
class Decompressor(BaseDecompressor):
method = CompressionMethod.ZSTD
method_byte = CompressionMethodByte.ZSTD
def get_decompressed_data(self, method_byte, compressed_hash,
extra_header_size):
size_with_header = read_binary_uint32(self.stream)
compressed_size = size_with_header - extra_header_size - 4
compressed = BytesIO(self.stream.read(compressed_size))
block_check = BytesIO()
write_binary_uint8(method_byte, block_check)
write_binary_uint32(size_with_header, block_check)
block_check.write(compressed.getvalue())
self.check_hash(block_check.getvalue(), compressed_hash)
compressed = compressed.read(compressed_size - 4)
return zstd.decompress(compressed)
|
11206
|
import numpy as np
import sys
import os
sys.path.append('utils/')
from config import *
from utils import *
sys.path.append(pycaffe_dir)
import time
import pdb
import random
import pickle as pkl
import caffe
from multiprocessing import Pool
from threading import Thread
import random
import h5py
import itertools
import math
import re
glove_dim = 300
glove_path = 'data/glove.6B.%dd.txt' %glove_dim
#glove_path = 'data/glove_debug_path.txt' #for debugging
if glove_path == 'data/glove_debug_path.txt':
print "continue?"
pdb.set_trace()
possible_segments = [(0,0), (1,1), (2,2), (3,3), (4,4), (5,5)]
for i in itertools.combinations(range(6), 2):
possible_segments.append(i)
length_prep_word = 40
length_prep_character = 250
vocab_file = 'data/vocab_glove_complete.txt'
def word_tokenize(s):
sent = s.lower()
sent = re.sub('[^A-Za-z0-9\s]+',' ', sent)
return sent.split()
def sentences_to_words(sentences):
words = []
for s in sentences:
words.extend(word_tokenize(str(s.lower())))
return words
class glove_embedding(object):
''' Creates glove embedding object
'''
def __init__(self, glove_file=glove_path):
glove_txt = open(glove_file).readlines()
glove_txt = [g.strip() for g in glove_txt]
glove_vector = [g.split(' ') for g in glove_txt]
glove_words = [g[0] for g in glove_vector]
glove_vecs = [g[1:] for g in glove_vector]
glove_array = np.zeros((glove_dim, len(glove_words)))
glove_dict = {}
for i, w in enumerate(glove_words): glove_dict[w] = i
for i, vec in enumerate(glove_vecs):
glove_array[:,i] = np.array(vec)
self.glove_array = glove_array
self.glove_dict = glove_dict
self.glove_words = glove_words
class zero_language_vector(object):
def __init__(self, data):
self.dim = glove_dim
def get_vector_dim(self):
return self.dim
def get_vocab_size(self):
return 0
def preprocess(self, data):
embedding = np.zeros((self.get_vector_dim(),))
for d in data:
d['language_input'] = embedding
d['gt'] = (d['gt'][0], d['gt'][1])
return data
class recurrent_language(object):
def get_vocab_size(self):
return len(self.vocab_dict.keys())
def preprocess_sentence(self, words):
vector_dim = self.get_vector_dim()
sentence_mat = np.zeros((len(words), vector_dim))
count_words = 0
for i, w in enumerate(words):
try:
sentence_mat[count_words,:] = self.vocab_dict[w]
count_words += 1
except:
if '<unk>' in self.vocab_dict.keys():
sentence_mat[count_words,:] = self.vocab_dict['<unk>']
count_words += 1
else:
pass
sentence_mat = sentence_mat[:count_words]
return sentence_mat
def preprocess(self, data):
for d in data:
words = sentences_to_words([d['description']])
d['language_input'] = self.preprocess(words)
return data
class recurrent_word(recurrent_language):
def __init__(self, data):
self.data = data
vocab = open(vocab_file).readlines()
vocab = [v.strip() for v in vocab]
if '<unk>' not in vocab:
vocab.append('<unk>')
vocab_dict = {}
for i, word in enumerate(vocab):
vocab_dict[word] = i
self.vocab_dict = vocab_dict
def get_vector_dim(self):
return 1
class recurrent_embedding(recurrent_language):
def read_embedding(self):
print "Reading glove embedding"
embedding = glove_embedding(glove_path)
self.embedding = embedding
def get_vector_dim(self):
return glove_dim
def __init__(self, data):
self.read_embedding()
embedding = self.embedding
vector_dim = self.get_vector_dim()
self.data = data
self.data = data
vocab = open(vocab_file).readlines()
vocab = [v.strip() for v in vocab]
if '<unk>' in vocab:
vocab.remove('<unk>') #don't have an <unk> vector. Alternatively, could map to random vector...
vocab_dict = {}
for i, word in enumerate(vocab):
try:
vocab_dict[word] = embedding.glove_array[:,embedding.glove_dict[word]]
except:
print "%s not in glove embedding" %word
self.vocab_dict = vocab_dict
def preprocess(self, data):
vector_dim = self.get_vector_dim()
for d in data:
d['language_input'] = sentences_to_words([d['description']])
return data
def get_vocab_dict(self):
return self.vocab_dict
#Methods for extracting visual features
def feature_process_base(start, end, features):
return np.mean(features[start:end+1,:], axis = 0)
def feature_process_norm(start, end, features):
base_feature = np.mean(features[start:end+1,:], axis = 0)
return base_feature/(np.linalg.norm(base_feature) + 0.00001)
def feature_process_context(start, end, features):
feature_dim = features.shape[1]
full_feature = np.zeros((feature_dim*2,))
if np.sum(features[5,:]) > 0:
full_feature[:feature_dim] = feature_process_norm(0,6, features)
else:
full_feature[:feature_dim] = feature_process_norm(0,5, features)
full_feature[feature_dim:feature_dim*2] = feature_process_norm(start, end, features)
return full_feature
feature_process_dict = {'feature_process_base': feature_process_base,
'feature_process_norm': feature_process_norm,
'feature_process_context': feature_process_context,
}
class extractData(object):
""" General class to extract data.
"""
def increment(self):
#uses iteration, batch_size, data_list, and num_data to extract next batch identifiers
next_batch = [None]*self.batch_size
if self.iteration + self.batch_size >= self.num_data:
next_batch[:self.num_data-self.iteration] = self.data_list[self.iteration:]
next_batch[self.num_data-self.iteration:] = self.data_list[:self.batch_size -(self.num_data-self.iteration)]
random.shuffle(self.data_list)
self.iteration = self.num_data - self.iteration
else:
next_batch = self.data_list[self.iteration:self.iteration+self.batch_size]
self.iteration += self.batch_size
assert self.iteration > -1
assert len(next_batch) == self.batch_size
return next_batch
class extractLanguageFeatures(extractData):
def __init__(self, dataset, params, result=None):
self.data_list = range(len(dataset))
self.num_data = len(self.data_list)
self.dataset = dataset
self.iteration = 0
self.vocab_dict = params['vocab_dict']
self.batch_size = params['batch_size']
self.num_glove_centroids = self.vocab_dict.values()[0].shape[0]
self.T = params['sentence_length']
if isinstance(result, dict):
self.result = result
self.query_key = params['query_key']
self.cont_key = params['cont_key']
self.top_keys = [self.query_key, self.cont_key]
self.top_shapes = [(self.T, self.batch_size, self.num_glove_centroids),
(self.T, self.batch_size)]
else:
print "Will only be able to run in test mode"
def get_features(self, query):
feature = np.zeros((self.T, self.num_glove_centroids))
cont = np.zeros((self.T,))
len_query = min(len(query), self.T)
if len_query < len(query):
query = query[:len_query]
for count_word, word in enumerate(query):
try:
feature[-(len_query)+count_word,:] = self.vocab_dict[word]
except:
feature[-(len_query)+count_word,:] = np.zeros((glove_dim,))
cont[-(len_query-1):] = 1
assert np.sum(feature[:-len_query,:]) == 0
return feature, cont
def get_data_test(self, data):
query = data['language_input']
return self.get_features(query)
def get_data(self, next_batch):
data = self.dataset
query_mat = np.zeros((self.T, self.batch_size, self.num_glove_centroids))
cont = np.zeros((self.T, self.batch_size))
for i, nb in enumerate(next_batch):
query = data[nb]['language_input']
query_mat[:,i,:], cont[:,i] = self.get_features(query)
self.result[self.query_key] = query_mat
self.result[self.cont_key] = cont
class extractVisualFeatures(extractData):
def __init__(self, dataset, params, result):
self.data_list = range(len(dataset))
self.feature_process_algo = params['feature_process']
self.loc_feature = params['loc_feature']
self.num_data = len(self.data_list)
self.dataset = dataset
self.iteration = 0
self.loc = params['loc_feature']
loss_type = params['loss_type']
assert loss_type in ['triplet', 'inter', 'intra']
self.inter = False
self.intra = False
if loss_type in ['triplet', 'inter']:
self.inter = True
if loss_type in ['triplet', 'intra']:
self.intra = True
self.batch_size = params['batch_size']
self.num_glove_centroids = params['num_glove_centroids']
features_h5py = h5py.File(params['features'])
features = {}
for key in features_h5py.keys():
features[key] = np.array(features_h5py[key])
features_h5py.close()
self.features = features
assert self.feature_process_algo in feature_process_dict.keys()
self.feature_process = feature_process_dict[self.feature_process_algo]
self.feature_dim = self.feature_process(0,0,self.features[self.dataset[0]['video']]).shape[-1]
self.result = result
self.feature_key_p = params['feature_key_p']
self.feature_time_stamp_p = params['feature_time_stamp_p']
self.feature_time_stamp_n = params['feature_time_stamp_n']
self.top_keys = [self.feature_key_p, self.feature_time_stamp_p, self.feature_time_stamp_n]
self.top_shapes = [(self.batch_size, self.feature_dim),
(self.batch_size, 2),
(self.batch_size,2)]
if self.inter:
self.feature_key_inter = 'features_inter'
self.top_keys.append(self.feature_key_inter)
self.top_shapes.append((self.batch_size, self.feature_dim))
if self.intra:
self.feature_key_intra = 'features_intra'
self.top_keys.append(self.feature_key_intra)
self.top_shapes.append((self.batch_size, self.feature_dim))
self.possible_annotations = possible_segments
def get_data_test(self, d):
video_feats = self.features[d['video']]
features = np.zeros((len(self.possible_annotations), self.feature_dim))
loc_feats = np.zeros((len(self.possible_annotations), 2))
for i, p in enumerate(self.possible_annotations):
features[i,:] = self.feature_process(p[0], p[1], video_feats)
loc_feats[i,:] = [p[0]/6., p[1]/6.]
return features, loc_feats
def get_data(self, next_batch):
feature_process = self.feature_process
data = self.dataset
features_p = np.zeros((self.batch_size, self.feature_dim))
if self.inter: features_inter = np.zeros((self.batch_size, self.feature_dim))
if self.intra: features_intra = np.zeros((self.batch_size, self.feature_dim))
features_time_stamp_p = np.zeros((self.batch_size, 2))
features_time_stamp_n = np.zeros((self.batch_size, 2))
for i, nb in enumerate(next_batch):
rint = random.randint(0,len(data[nb]['times'])-1)
gt_s = data[nb]['times'][rint][0]
gt_e = data[nb]['times'][rint][1]
possible_n = list(set(self.possible_annotations) - set(((gt_s,gt_e),)))
random.shuffle(possible_n)
n = possible_n[0]
assert n != (gt_s, gt_e)
video = data[nb]['video']
feats = self.features[video]
if self.inter:
other_video = data[nb]['video']
while (other_video == video):
other_video_index = int(random.random()*len(data))
other_video = data[other_video_index]['video']
feats_inter = self.features[other_video]
features_p[i,:] = feature_process(gt_s, gt_e, feats)
if self.intra:
features_intra[i,:] = feature_process(n[0], n[1], feats)
if self.inter:
try:
features_inter[i,:] = feature_process(gt_s, gt_e, feats_inter)
except:
pdb.set_trace()
if self.loc:
features_time_stamp_p[i,0] = gt_s/6.
features_time_stamp_p[i,1] = gt_e/6.
features_time_stamp_n[i,0] = n[0]/6.
features_time_stamp_n[i,1] = n[1]/6.
else:
features_time_stamp_p[i,0] = 0
features_time_stamp_p[i,1] = 0
features_time_stamp_n[i,0] = 0
features_time_stamp_n[i,1] = 0
assert not math.isnan(np.mean(self.features[data[nb]['video']][n[0]:n[1]+1,:]))
assert not math.isnan(np.mean(self.features[data[nb]['video']][gt_s:gt_e+1,:]))
self.result[self.feature_key_p] = features_p
self.result[self.feature_time_stamp_p] = features_time_stamp_p
self.result[self.feature_time_stamp_n] = features_time_stamp_n
if self.inter:
self.result[self.feature_key_inter] = features_inter
if self.intra:
self.result[self.feature_key_intra] = features_intra
class batchAdvancer(object):
def __init__(self, extractors):
self.extractors = extractors
self.increment_extractor = extractors[0]
def __call__(self):
#The batch advancer just calls each extractor
next_batch = self.increment_extractor.increment()
for e in self.extractors:
e.get_data(next_batch)
class python_data_layer(caffe.Layer):
""" General class to extract data.
"""
def setup(self, bottom, top):
random.seed(10)
self.params = eval(self.param_str)
params = self.params
assert 'top_names' in params.keys()
#set up prefetching
self.thread_result = {}
self.thread = None
self.setup_extractors()
self.batch_advancer = batchAdvancer(self.data_extractors)
shape_dict = {}
self.top_names = []
for de in self.data_extractors:
for top_name, top_shape in zip(de.top_keys, de.top_shapes):
shape_dict[top_name] = top_shape
self.top_names.append((params['top_names'].index(top_name), top_name))
self.dispatch_worker()
self.top_shapes = [shape_dict[tn[1]] for tn in self.top_names]
print 'Outputs:', self.top_names
if len(top) != len(self.top_names):
raise Exception('Incorrect number of outputs (expected %d, got %d)' %
(len(self.top_names), len(top)))
self.join_worker()
#for top_index, name in enumerate(self.top_names.keys()):
top_count = 0
for top_index, name in self.top_names:
shape = self.top_shapes[top_count]
print 'Top name %s has shape %s.' %(name, shape)
top[top_index].reshape(*shape)
top_count += 1
def reshape(self, bottom, top):
pass
def forward(self, bottom, top):
if self.thread is not None:
self.join_worker()
for top_index, name in self.top_names:
top[top_index].data[...] = self.thread_result[name]
self.dispatch_worker()
def dispatch_worker(self):
assert self.thread is None
self.thread = Thread(target=self.batch_advancer)
self.thread.start()
def join_worker(self):
assert self.thread is not None
self.thread.join()
self.thread = None
def backward(self, top, propoagate_down, bottom):
pass
feature_process_dict = {'feature_process_base': feature_process_base,
'feature_process_norm': feature_process_norm,
'feature_process_context': feature_process_context,
}
language_feature_process_dict = {'zero_language': zero_language_vector,
'recurrent_embedding': recurrent_embedding}
class dataLayer_ExtractPairedLanguageVision(python_data_layer):
def setup_extractors(self):
assert 'top_names' in self.params.keys()
assert 'descriptions' in self.params.keys()
assert 'features' in self.params.keys()
if 'batch_size' not in self.params.keys(): self.params['batch_size'] = 120
self.params['query_key'] = 'query'
self.params['feature_key_n'] = 'features_n'
self.params['feature_key_p'] = 'features_p'
self.params['feature_key_t'] = 'features_t'
self.params['feature_time_stamp_p'] = 'features_time_stamp_p'
self.params['feature_time_stamp_n'] = 'features_time_stamp_n'
self.params['cont_key'] = 'cont'
language_extractor_fcn = extractLanguageFeatures
visual_extractor_fcn = extractVisualFeatures
language_process = recurrent_embedding
data_orig = read_json(self.params['descriptions'])
random.shuffle(data_orig)
language_processor = language_process(data_orig)
data = language_processor.preprocess(data_orig)
self.params['vocab_dict'] = language_processor.vocab_dict
num_glove_centroids = language_processor.get_vector_dim()
self.params['num_glove_centroids'] = num_glove_centroids
visual_feature_extractor = visual_extractor_fcn(data, self.params, self.thread_result)
textual_feature_extractor = language_extractor_fcn(data, self.params, self.thread_result)
self.data_extractors = [visual_feature_extractor, textual_feature_extractor]
|
11216
|
from fuzzconfig import FuzzConfig
import nonrouting
import fuzzloops
import re
cfgs = [
FuzzConfig(job="SYSCONFIG40", device="LIFCL-40", sv="../shared/empty_40.v",
tiles=["CIB_R0C75:EFB_0", "CIB_R0C72:BANKREF0", "CIB_R0C77:EFB_1_OSC", "CIB_R0C79:EFB_2",
"CIB_R0C81:I2C_EFB_3", "CIB_R0C85:PMU", "CIB_R0C87:MIB_CNR_32_FAFD", "CIB_R1C87:IREF_P33", "CIB_R2C87:POR"]),
FuzzConfig(job="SYSCONFIG17", device="LIFCL-17", sv="../shared/empty_17.v",
tiles=["CIB_R1C75:IREF_15K", "CIB_R0C75:PPT_QOUT_15K", "CIB_R0C74:PVTCAL33_15K", "CIB_R0C73:POR_15K",
"CIB_R0C72:I2C_15K", "CIB_R0C71:OSC_15K", "CIB_R0C70:PMU_15K", "CIB_R0C66:EFB_15K"])
]
def main():
for cfg in cfgs:
cfg.setup()
empty = cfg.build_design(cfg.sv, {})
cfg.sv = "../shared/empty_presyn_40.v"
cfg.struct_mode = False
def get_substs(k, v):
return dict(sysconfig="{}={}".format(k, v))
nonrouting.fuzz_enum_setting(cfg, empty, "SYSCONFIG.MASTER_SPI_PORT", ["DISABLE", "SERIAL", "DUAL", "QUAD"],
lambda x: get_substs("MASTER_SPI_PORT", x), False,
assume_zero_base=True,
desc="status of master SPI port after configuration")
nonrouting.fuzz_enum_setting(cfg, empty, "SYSCONFIG.SLAVE_SPI_PORT", ["DISABLE", "SERIAL", "DUAL", "QUAD"],
lambda x: get_substs("SLAVE_SPI_PORT", x), False,
assume_zero_base=True,
desc="status of slave SPI port after configuration")
nonrouting.fuzz_enum_setting(cfg, empty, "SYSCONFIG.SLAVE_I2C_PORT", ["DISABLE", "ENABLE"],
lambda x: get_substs("SLAVE_I2C_PORT", x), False,
assume_zero_base=True,
desc="status of slave I2C port after configuration")
nonrouting.fuzz_enum_setting(cfg, empty, "SYSCONFIG.SLAVE_I3C_PORT", ["DISABLE", "ENABLE"],
lambda x: get_substs("SLAVE_I3C_PORT", x), False,
assume_zero_base=True,
desc="status of slave I3C port after configuration")
nonrouting.fuzz_enum_setting(cfg, empty, "SYSCONFIG.JTAG_PORT", ["DISABLE", "ENABLE"],
lambda x: get_substs("JTAG_PORT", x), False,
assume_zero_base=True,
desc="status of JTAG port after configuration")
nonrouting.fuzz_enum_setting(cfg, empty, "SYSCONFIG.DONE_PORT", ["DISABLE", "ENABLE"],
lambda x: get_substs("DONE_PORT", x), False,
assume_zero_base=True,
desc="use DONE output after configuration")
nonrouting.fuzz_enum_setting(cfg, empty, "SYSCONFIG.INITN_PORT", ["DISABLE", "ENABLE"],
lambda x: get_substs("INITN_PORT", x), False,
assume_zero_base=True,
desc="use INITN input after configuration")
nonrouting.fuzz_enum_setting(cfg, empty, "SYSCONFIG.PROGRAMN_PORT", ["DISABLE", "ENABLE"],
lambda x: get_substs("PROGRAMN_PORT", x), False,
assume_zero_base=True,
desc="use PROGRAMN input after configuration")
if __name__ == "__main__":
main()
|
11240
|
import unittest
import numpy as np
from astroNN.lamost import wavelength_solution, pseudo_continuum
class LamostToolsTestCase(unittest.TestCase):
def test_wavelength_solution(self):
wavelength_solution()
wavelength_solution(dr=5)
self.assertRaises(ValueError, wavelength_solution, dr=1)
def test_norm(self):
pseudo_continuum(np.ones(3909), np.ones(3909))
if __name__ == '__main__':
unittest.main()
|
11302
|
import os
import json
STOPWORDS_JSON_PATH = os.path.join(
os.path.dirname(os.path.abspath(__file__)), os.pardir, "corpora/stopwords.json"
)
with open(STOPWORDS_JSON_PATH, "r", encoding="utf-8") as f:
STOPWORD = json.load(f)["stopwords"]
|
11393
|
import FWCore.ParameterSet.Config as cms
from RecoEgamma.ElectronIdentification.Identification.mvaElectronID_tools import *
# Documentation of the MVA
# https://twiki.cern.ch/twiki/bin/viewauth/CMS/MultivariateElectronIdentificationRun2
# https://rembserj.web.cern.ch/rembserj/notes/Electron_MVA_ID_2017_documentation
#
# In this file we define the locations of the MVA weights, cuts on the MVA values
# for specific working points, and configure those cuts in VID
#
# The tag is an extra string attached to the names of the products
# such as ValueMaps that needs to distinguish cases when the same MVA estimator
# class is used with different tuning/weights
mvaTag = "Fall17NoIsoV1"
# There are 6 categories in this MVA. They have to be configured in this strict order
# (cuts and weight files order):
# 0 EB1 (eta<0.8) pt 5-10 GeV | pt < ptSplit && |eta| < ebSplit
# 1 EB2 (eta>=0.8) pt 5-10 GeV | pt < ptSplit && |eta| >= ebSplit && |eta| < ebeeSplit
# 2 EE pt 5-10 GeV | pt < ptSplit && |eta| >= ebeeSplit
# 3 EB1 (eta<0.8) pt 10-inf GeV | pt >= ptSplit && |eta| < ebSplit
# 4 EB2 (eta>=0.8) pt 10-inf GeV | pt >= ptSplit && |eta| >= ebSplit && |eta| < ebeeSplit
# 5 EE pt 10-inf GeV | pt >= ptSplit && |eta| >= ebeeSplit
mvaFall17WeightFiles_V1 = cms.vstring(
"RecoEgamma/ElectronIdentification/data/Fall17/EIDmva_EB1_5_2017_puinfo_BDT.weights.xml.gz",
"RecoEgamma/ElectronIdentification/data/Fall17/EIDmva_EB2_5_2017_puinfo_BDT.weights.xml.gz",
"RecoEgamma/ElectronIdentification/data/Fall17/EIDmva_EE_5_2017_puinfo_BDT.weights.xml.gz",
"RecoEgamma/ElectronIdentification/data/Fall17/EIDmva_EB1_10_2017_puinfo_BDT.weights.xml.gz",
"RecoEgamma/ElectronIdentification/data/Fall17/EIDmva_EB2_10_2017_puinfo_BDT.weights.xml.gz",
"RecoEgamma/ElectronIdentification/data/Fall17/EIDmva_EE_10_2017_puinfo_BDT.weights.xml.gz"
)
## The working point for this MVA that is expected to have about 90% signal
# WP tuned to give about 90 and 80% signal efficiecny for electrons from Drell-Yan with pT > 25 GeV
# The working point for the low pt categories is just taken over from the high pt
idName90 = "mvaEleID-Fall17-noIso-V1-wp90"
MVA_WP90 = EleMVA_WP(
idName = idName90, mvaTag = mvaTag,
cutCategory0 = "0.9165112826974601 - exp(-pt / 2.7381703555094217) * 1.03549199648109", # EB1 low pt
cutCategory1 = "0.8655738322220173 - exp(-pt / 2.4027944652597073) * 0.7975615613282494", # EB2 low pt
cutCategory2 = "-3016.035055227131 - exp(-pt / -52140.61856333602) * -3016.3029387236506", # EE low pt
cutCategory3 = "0.9616542816132922 - exp(-pt / 8.757943837889817) * 3.1390200321591206", # EB1
cutCategory4 = "0.9319258011430132 - exp(-pt / 8.846057432565809) * 3.5985063793347787", # EB2
cutCategory5 = "0.8899260780999244 - exp(-pt / 10.124234115859881) * 4.352791250718547", # EE
)
idName80 = "mvaEleID-Fall17-noIso-V1-wp80"
MVA_WP80 = EleMVA_WP(
idName = idName80, mvaTag = mvaTag,
cutCategory0 = "0.9530240956555949 - exp(-pt / 2.7591425841003647) * 0.4669644718545271", # EB1 low pt
cutCategory1 = "0.9336564763961019 - exp(-pt / 2.709276284272272) * 0.33512286599215946", # EB2 low pt
cutCategory2 = "0.9313133688365339 - exp(-pt / 1.5821934800715558) * 3.8889462619659265", # EE low pt
cutCategory3 = "0.9825268564943458 - exp(-pt / 8.702601455860762) * 1.1974861596609097", # EB1
cutCategory4 = "0.9727509457929913 - exp(-pt / 8.179525631018565) * 1.7111755094657688", # EB2
cutCategory5 = "0.9562619539540145 - exp(-pt / 8.109845366281608) * 3.013927699126942", # EE
)
### WP tuned for HZZ analysis with very high efficiency (about 98%)
# The working points were found by requiring the same signal efficiencies in
# each category as for the Spring 16 HZZ ID
# (see RecoEgamma/ElectronIdentification/python/Identification/mvaElectronID_Spring16_HZZ_V1_cff.py)
idNamewpLoose = "mvaEleID-Fall17-noIso-V1-wpLoose"
MVA_WPLoose = EleMVA_WP(
idName = idNamewpLoose, mvaTag = mvaTag,
cutCategory0 = "-0.13285867293779202", # EB1 low pt
cutCategory1 = "-0.31765300958836074", # EB2 low pt
cutCategory2 = "-0.0799205914718861" , # EE low pt
cutCategory3 = "-0.856871961305474" , # EB1
cutCategory4 = "-0.8107642141584835" , # EB2
cutCategory5 = "-0.7179265933023059" # EE
)
#
# Finally, set up VID configuration for all cuts
#
# Create the PSet that will be fed to the MVA value map producer
mvaEleID_Fall17_noIso_V1_producer_config = cms.PSet(
mvaName = cms.string(mvaClassName),
mvaTag = cms.string(mvaTag),
# Category parameters
nCategories = cms.int32(6),
categoryCuts = cms.vstring(*EleMVA_6CategoriesCuts),
# Weight files and variable definitions
weightFileNames = mvaFall17WeightFiles_V1,
variableDefinition = cms.string("RecoEgamma/ElectronIdentification/data/ElectronMVAEstimatorRun2Fall17V1Variables.txt")
)
# Create the VPset's for VID cuts
mvaEleID_Fall17_V1_wpLoose = configureVIDMVAEleID( MVA_WPLoose )
mvaEleID_Fall17_V1_wp90 = configureVIDMVAEleID( MVA_WP90 )
mvaEleID_Fall17_V1_wp80 = configureVIDMVAEleID( MVA_WP80 )
mvaEleID_Fall17_V1_wpLoose.isPOGApproved = cms.untracked.bool(True)
mvaEleID_Fall17_V1_wp90.isPOGApproved = cms.untracked.bool(True)
mvaEleID_Fall17_V1_wp80.isPOGApproved = cms.untracked.bool(True)
|
11404
|
from typing import Union, List
import pexpect
from figcli.utils.utils import Utils
import sys
class FiggyAction:
"""
Actions prevent cyclic dependencies, and are designed for leveraging FiggyCli for cleanup steps when running inside
of tests.
"""
def __init__(self, command, extra_args=""):
self.c = Utils.default_colors()
self.command = command
self.extra_args = extra_args
self._child = self.spawn(command)
print(f"{self.c.fg_yl}Executing action: {self._child.args}{self.c.rs}")
self._child.logfile = sys.stdout
self._child.delaybeforesend = .5
def spawn(self, command: str):
return pexpect.spawn(command, timeout=10, encoding='utf-8')
def expect_multiple(self, regexes: List[str]):
print(f'Expecting: {regexes}')
return self._child.expect(regexes)
def expect(self, regex: Union[List[str], str], retry=True):
print(f'Expecting: {regex}')
expect_list = [regex] + [pexpect.TIMEOUT] if isinstance(regex, str) else regex + [pexpect.TIMEOUT]
result = self._child.expect(expect_list)
if result == len(expect_list) - 1 and retry:
self.alert(f"EXPECT FAILED: {regex} initiating retry!")
self._child = self.spawn(self.command)
return self.expect(regex, retry=False)
else:
return result
def sendline(self, line: str):
print(f'Sending: {line}')
self._child.sendline(line)
def wait(self):
self._child.wait()
def alert(self, msg: str):
print(f"{self.c.fg_yl}-----------------------------------------{self.c.rs}")
print(f"{self.c.fg_rd} ALERT: {msg}{self.c.rs}")
print(f"{self.c.fg_yl}-----------------------------------------{self.c.rs}")
|
11411
|
import contextlib
import os
import tempfile
import warnings
from enum import Enum
import mip
class IISFinderAlgorithm(Enum):
DELETION_FILTER = 1
ADDITIVE_ALGORITHM = 2
class SubRelaxationInfeasible(Exception):
pass
class NonRelaxableModel(Exception):
pass
class ConflictFinder:
"""This class groups some IIS (Irreducible Infeasible Set) search algorithms"""
def __init__(self, model: mip.Model):
if model.status == mip.OptimizationStatus.LOADED:
print("model not runned yet, checking if feasible or not")
model.emphasis = 1 # feasibility
model.preprocess = 1 # -1 automatic, 0 off, 1 on.
model.optimize()
assert (
model.status == mip.OptimizationStatus.INFEASIBLE
), "model is not linear infeasible"
self.model = model
def find_iis(
self, method: IISFinderAlgorithm = IISFinderAlgorithm.DELETION_FILTER,
cbc_verbose: bool = False
) -> mip.ConstrList:
"""main method to find an IIS, this method is just a grouping of the other implementations
Args:
model (mip.Model): Infeasible model where to find the IIS
method (str, optional): name of the method to use ["deletion-filter", "additive_algorithm"]. Defaults to 'deletion-filter".
Returns:
mip.ConstrList: IIS constraint list
"""
# assert ,is not because time limit
with contextlib.nullcontext() if cbc_verbose else ignore_output() as iow:
if method == IISFinderAlgorithm.DELETION_FILTER:
return self.deletion_filter()
if method == IISFinderAlgorithm.ADDITIVE_ALGORITHM:
return self.additive_algorithm()
def deletion_filter(self) -> mip.ConstrList:
"""deletion filter algorithm for search an IIS
Args:
model (mip.Model): Infeasible model
Returns:
mip.ConstrList: IIS
"""
# 1. create a model with all constraints but one
aux_model = self.model.copy()
aux_model.objective = 1
aux_model.emphasis = 1 # feasibility
aux_model.preprocess = 1 # -1 automatic, 0 off, 1 on.
print("starting deletion_filter algorithm")
for inc_crt in self.model.constrs:
aux_model_inc_crt = aux_model.constr_by_name(
inc_crt.name
) # find constraint by name
aux_model.remove(aux_model_inc_crt) # temporally remove inc_crt
aux_model.optimize()
status = aux_model.status
# 2. test feasibility, if feasible, return dropped constraint to the set
# 2.1 else removed it permanently
# print('status {}'.format(status))
if status == mip.OptimizationStatus.INFEASIBLE:
# print("removing permanently {}".format(inc_crt.name))
continue
elif status in [
mip.OptimizationStatus.FEASIBLE,
mip.OptimizationStatus.OPTIMAL,
]:
aux_model.add_constr(
inc_crt.expr, name=inc_crt.name, priority=inc_crt.priority
)
iis = aux_model.constrs
return iis
def additive_algorithm(self) -> mip.ConstrList:
"""Additive algorithm to find an IIS
Returns:
mip.ConstrList: IIS
"""
# Create some aux models to test feasibility of the set of constraints
aux_model_testing = mip.Model()
for var in self.model.vars:
aux_model_testing.add_var(
name=var.name,
lb=var.lb,
ub=var.ub,
var_type=var.var_type,
# obj= var.obj,
# column=var.column #!! libc++abi.dylib: terminating with uncaught exception of type CoinError
)
aux_model_testing.objective = 1
aux_model_testing.emphasis = 1 # feasibility
aux_model_testing.preprocess = 1 # -1 automatic, 0 off, 1 on.
aux_model_iis = (
aux_model_testing.copy()
) # a second aux model to test feasibility of the incumbent iis
# algorithm start
all_constraints = self.model.constrs
testing_crt_set = mip.ConstrList(model=aux_model_testing) # T
iis = mip.ConstrList(model=aux_model_iis) # I
while True:
for crt in all_constraints:
testing_crt_set.add(crt.expr, name=crt.name)
aux_model_testing.constrs = testing_crt_set
aux_model_testing.optimize()
if aux_model_testing.status == mip.OptimizationStatus.INFEASIBLE:
iis.add(crt.expr, name=crt.name)
aux_model_iis.constrs = iis
aux_model_iis.optimize()
if aux_model_iis.status == mip.OptimizationStatus.INFEASIBLE:
return iis
elif aux_model_iis.status in [
mip.OptimizationStatus.FEASIBLE,
mip.OptimizationStatus.OPTIMAL,
]:
testing_crt_set = mip.ConstrList(model=aux_model_testing)
for (
crt
) in (
iis
): # basically this loop is for set T=I // aux_model_iis = iis.copy()
testing_crt_set.add(crt.expr, name=crt.name)
break
def deletion_filter_milp_ir_lc_bd(self) -> mip.ConstrList:
"""Integer deletion filter algorithm (milp_ir_lc_bd)
Raises:
NotImplementedError: [description]
Returns:
mip.ConstrList: [description]
"""
raise NotImplementedError("WIP")
# major constraint sets definition
t_aux_model = mip.Model(name="t_auxiliary_model")
iis_aux_model = mip.Model(name="t_auxiliary_model")
linear_constraints = mip.ConstrList(
model=t_aux_model
) # all the linear model constraints
variable_bound_constraints = mip.ConstrList(
model=t_aux_model
) # all the linear model constrants related specifically for the variable bounds
integer_varlist_crt = mip.VarList(
model=t_aux_model
) # the nature vars constraints for vartype in Integer/Binary
# fill the above sets with the constraints
for crt in self.model.constrs:
linear_constraints.add(crt.expr, name=crt.name)
for var in self.model.vars:
if var.lb != -mip.INF:
variable_bound_constraints.add(
var >= var.lb, name="{}_lb_crt".format(var.name)
)
if var.ub != mip.INF:
variable_bound_constraints.add(
var <= var.ub, name="{}_ub_crt".format(var.name)
)
for var in self.model.vars:
if var.var_type in (mip.INTEGER, mip.BINARY):
integer_varlist_crt.add(var)
status = "IIS"
# add all LC,BD to the incumbent, T= LC + BD
for (
var
) in (
self.model.vars
): # add all variables as if they where CONTINUOUS and without bonds (because this will be separated)
iis_aux_model.add_var(
name=var.name, lb=-mip.INF, ub=mip.INF, var_type=mip.CONTINUOUS
)
for crt in linear_constraints + variable_bound_constraints:
iis_aux_model.add_constr(crt.expr, name=crt.name, priority=crt.priority)
iis_aux_model.optimize()
if iis_aux_model.status == mip.OptimizationStatus.INFEASIBLE:
# if infeasible means that this is a particular version of an LP
return self.deletion_filter() # (STEP 2)
# add all the integer constraints to the model
iis_aux_model.vars.remove(
[var for var in integer_varlist_crt]
) # remove all integer variables
for var in integer_varlist_crt:
iis_aux_model.add_var(
name=var.name,
lb=-mip.INF,
ub=mip.INF,
var_type=var.var_type, # this will add the var with his original type
)
# filter IR constraints that create infeasibility (STEP 1)
for var in integer_varlist_crt:
iis_aux_model.vars.remove(iis_aux_model.var_by_name(var.name))
iis_aux_model.add_var(
name=var.name,
lb=-mip.INF,
ub=mip.INF,
var_type=mip.CONTINUOUS, # relax the integer constraint over var
)
iis_aux_model.optimize()
# if infeasible then update incumbent T = T-{ir_var_crt}
# else continue
# STEP 2 filter lc constraints
# STEP 3 filter BD constraints
# return IS o IIS
def deletion_filter_milp_lc_ir_bd(self) -> mip.ConstrList:
raise NotImplementedError # TODO
class ConflictRelaxer:
def __init__(self, model: mip.Model):
if model.status == mip.OptimizationStatus.LOADED:
print("model not runned yet, checking if feasible or not")
model.emphasis = 1 # feasibility
model.preprocess = 1 # -1 automatic, 0 off, 1 on.
model.optimize()
assert (
model.status == mip.OptimizationStatus.INFEASIBLE
), "model is not linear infeasible"
self.model = model
self.iis_num_iterations = 0
self.iis_iterations = []
self.relax_slack_iterations = []
@property
def slack_by_crt(self) -> dict:
answ = {}
for slack_dict_iter in self.relax_slack_iterations:
for crt_name in slack_dict_iter.keys():
if crt_name in answ.keys():
answ[crt_name] += slack_dict_iter[crt_name]
else:
answ[crt_name] = slack_dict_iter[crt_name]
return answ
def hierarchy_relaxer(
self,
relaxer_objective: str = "min_abs_slack_val",
default_priority: mip.constants.ConstraintPriority = mip.constants.ConstraintPriority.MANDATORY,
cbc_verbose: bool = False
) -> mip.Model:
"""hierarchy relaxer algorithm, it's gonna find a IIS and then relax it using the objective function defined (`relaxer_objective`) and then update the model
with the relaxed constraints. This process runs until there's not more IIS on the model.
Args:
relaxer_objective (str, optional): objective function of the relaxer model (IIS relaxer model). Defaults to 'min_abs_slack_val'.
default_priority (ConstraintPriority, optional): If a constraint does not have a supported substring priority in the name, it will assign a default priority.
Defaults to ConstraintPriority.MANDATORY.
Raises:
NonRelaxableModel: [description]
Returns:
mip.Model: relaxed model
"""
relaxed_model = self.model.copy()
relaxed_model._status = self.model._status # TODO solve this in a different way
# map unmaped constraitns to default
for crt in relaxed_model.constrs:
if not crt.priority:
crt.priority = default_priority
iis_it = 0
iis_dict = {}
taboo_list_iis = []
cf = ConflictFinder(relaxed_model)
while True:
# 1. find iis
iis = cf.find_iis(IISFinderAlgorithm.DELETION_FILTER)
self.iis_iterations.append([crt.name for crt in iis]) # track iteration
self.iis_num_iterations += 1 # track iteration
iis_priority_set = set([crt.priority for crt in iis])
# check if "relaxable" model mapping
if iis_priority_set == set([mip.constants.ConstraintPriority.MANDATORY]):
raise NonRelaxableModel("Infeasible model, is not possible to relax MANDATORY constraints")
# 2. relax iis
with contextlib.nullcontext() if cbc_verbose else ignore_output() as iow:
for level, relaxing_level in enumerate(sorted(iis_priority_set, key=lambda x: x.value)):
# highest case (raise exception)
if relaxing_level == mip.constants.ConstraintPriority.MANDATORY:
raise NonRelaxableModel("Infeasible model, is not possible to relax MANDATORY constraints")
try:
slack_dict = self.relax_iis(iis, relaxer_objective=relaxer_objective, lowest_priority=relaxing_level)
except SubRelaxationInfeasible as e:
warnings.warn(f'Warning relaxing more than one level, currently on l{level} : {relaxing_level}')
continue
else:
# relaxable iis, this is will continue with the next iteration then
break
self.relax_slack_iterations.append(slack_dict)
# 3. add the slack variables to the original problem
with contextlib.nullcontext() if cbc_verbose else ignore_output() as iow:
relaxed_model = self.relax_constraints(relaxed_model, slack_dict)
# 4. check if feasible
relaxed_model.emphasis = 1 # feasibility
with contextlib.nullcontext() if cbc_verbose else ignore_output() as iow:
relaxed_model.optimize()
if relaxed_model.status in [
mip.OptimizationStatus.FEASIBLE,
mip.OptimizationStatus.OPTIMAL,
]:
print("finished relaxation process !")
break
else:
print(
"relaxed the current IIS, still infeasible, searching for a new IIS to relax"
)
print("relaxed constraints {0}".format(list(slack_dict.keys())))
iis_it += 1
# print(f'found iis_{iis_it} = {[crt.name for crt in iis]}')
iis_dict[iis_it] = {}
iis_crt = [crt.name for crt in iis]
iis_dict[iis_it]['iis'] = [{'name': crt.name, 'priority': str(crt.priority).split('.')[1]} for crt in iis]
print(f'found iis_{iis_it} : len = {len(iis_crt)} in_taboo = {(iis_crt in taboo_list_iis)}')
taboo_list_iis.append(iis_crt)
iis_dict[iis_it]['slack'] = slack_dict
return relaxed_model
@classmethod
def relax_iis(
cls, iis: mip.ConstrList, relaxer_objective: str = "min_abs_slack_val", lowest_priority: 'mip.constants.ConstraintPriority' = None
) -> dict:
"""This function is the sub module that finds the optimum relaxation for an IIS, given a crt priority mapping and a objective function
Args:
iis (mip.ConstrList): IIS constraint list
relaxer_objective (str, optional): objective function to use when relaxing. Defaults to 'min_abs_slack_val'.
Returns:
dict: a slack variable dictionary with the value of the {constraint_name:slack.value} pair to be added to each constraint in order to make the IIS feasible
"""
relax_iis_model = mip.Model()
if lowest_priority is None:
lowest_priority = min([crt.priority for crt in iis])
to_relax_crts = [crt for crt in iis if crt.priority == lowest_priority or crt.priority < lowest_priority]
# create a model that only contains the iis
slack_vars = {}
abs_slack_vars = {}
abs_slack_cod_vars = {}
for crt in iis:
# print(crt.name, crt.priority)
for var in crt._Constr__model.vars:
relax_iis_model.add_var(
name=var.name,
lb=var.lb,
ub=var.ub,
var_type=var.var_type,
obj=var.obj,
)
if crt in to_relax_crts:
# if this is a -to be relax- constraint
slack_vars[crt.name] = relax_iis_model.add_var(
name="{0}__{1}".format(crt.name, "slack"),
lb=-mip.INF,
ub=mip.INF,
var_type=mip.CONTINUOUS,
)
abs_slack_vars[crt.name] = relax_iis_model.add_var(
name="{0}_abs".format(slack_vars[crt.name].name),
lb=0,
ub=mip.INF,
var_type=mip.CONTINUOUS,
)
# add relaxed constraint to model
relax_expr = crt.expr + slack_vars[crt.name]
relax_iis_model.add_constr(
relax_expr,
name="{}_relaxed".format(crt.name),
)
# add abs(slack) variable encoding constraints
relax_iis_model.add_constr(
abs_slack_vars[crt.name] >= slack_vars[crt.name],
name="{}_positive_min_bound".format(slack_vars[crt.name].name),
)
relax_iis_model.add_constr(
abs_slack_vars[crt.name] >= -slack_vars[crt.name],
name="{}_negative_min_bound".format(slack_vars[crt.name].name),
)
else:
# if not to be relaxed we added directly to the model
relax_iis_model.add_constr(
crt.expr, name="{}_original".format(crt.name), priority=crt.priority
)
# find the min abs value of the slack variables
relax_iis_model.objective = mip.xsum(list(abs_slack_vars.values()))
relax_iis_model.sense = mip.MINIMIZE
relax_iis_model.optimize()
if relax_iis_model.status == mip.OptimizationStatus.INFEASIBLE:
raise SubRelaxationInfeasible("sub relaxation model infeasible, this could mean that in the IIS the mandatory constraints are infeasible sometimes")
slack_dict = {}
for crt in to_relax_crts:
slack_dict[crt.name] = slack_vars[crt.name].x
return slack_dict
@classmethod
def relax_constraints(cls, relaxed_model: mip.Model, slack_dict: dict) -> mip.Model:
"""this method creates a modification of the model `relaxed_model` where all the constraints in the slack_dict are
modified in order to add the slack values to make the IIS disappear
Args:
relaxed_model (mip.Model): model to relax
slack_dict (dict): pairs {constraint_name: slack_var.value}
Returns:
mip.Model: a modification of the original model where all the constraints are modified with the slack values
"""
for crt_name in slack_dict.keys():
crt_original = relaxed_model.constr_by_name(crt_name)
relax_expr = crt_original.expr + slack_dict[crt_name]
relaxed_model.add_constr(
relax_expr, name=crt_original.name, priority=crt_original.priority
)
relaxed_model.remove(crt_original) # remove constraint
return relaxed_model
@contextlib.contextmanager
def ignore_output():
with tempfile.TemporaryFile() as f:
orig_std_out = os.dup(1)
os.dup2(f.fileno(), 1)
yield # pause the coroutine to execute the with code
os.dup2(orig_std_out, 1)
os.close(orig_std_out)
|
11416
|
from channels import Group
# websocket.connect
def ws_add(message):
Group("chat").add(message.reply_channel)
# websocket.receive
def ws_message(message):
Group("chat").send({
"text": message.content['text'],
})
# websocket.disconnect
def ws_disconnect(message):
Group("chat").discard(message.reply_channel)
|
11469
|
import re
regex = re.compile('[^a-zA-Z]')
def score_word(word, corpus=None):
word = regex.sub('', word) # leave only alpha
score = 0
consec_bonus = 2
for i, letter in enumerate(word):
if letter.islower():
continue
if i > 0 and word[i-1].upper():
score += consec_bonus
if i == 0:
score += 10
elif (i == 1) or (i == len(word)-1):
score += 3
else:
score += 1
if (i >= 1) and (corpus is not None) and (word[i:].lower() in corpus):
score += len(word[i:])-1
return score
def score_acronym(capitalized_acronym, corpus=None):
"""
For each capitalized letter in the acronym:
* 10 points if first letter in a word (with exception of first letter)
* 3 point if second or last letter in a word
* 1 point otherwise
* N bonus points if begins an N-length valid sub-word
(ex: multiVariable -> 8 bonus points)
* 2 bonus points if immediately following a capitalizd letter
"""
return sum([score_word(word, corpus=corpus) for word in capitalized_acronym.split(' ')]) - 10
|
11474
|
from pypy.module.cpyext.test.test_api import BaseApiTest
class TestIterator(BaseApiTest):
def test_check_iter(self, space, api):
assert api.PyIter_Check(space.iter(space.wrap("a")))
assert api.PyIter_Check(space.iter(space.newlist([])))
assert not api.PyIter_Check(space.w_type)
assert not api.PyIter_Check(space.wrap(2))
def test_getIter(self, space, api):
w_iter = api.PyObject_GetIter(space.wrap([1, 2, 3]))
assert space.unwrap(api.PyIter_Next(w_iter)) == 1
assert space.unwrap(api.PyIter_Next(w_iter)) == 2
assert space.unwrap(api.PyIter_Next(w_iter)) == 3
assert api.PyIter_Next(w_iter) is None
assert not api.PyErr_Occurred()
def test_iternext_error(self,space, api):
assert api.PyIter_Next(space.w_None) is None
assert api.PyErr_Occurred() is space.w_TypeError
api.PyErr_Clear()
|
11483
|
import argparse
import json
import numpy as np
import os
import torch
import data_
import models
import utils
from matplotlib import cm, pyplot as plt
from tensorboardX import SummaryWriter
from torch import optim
from torch.utils import data
from tqdm import tqdm
from utils import io
parser = argparse.ArgumentParser()
# CUDA
parser.add_argument('--use_gpu', type=bool, default=True, help='Whether to use GPU.')
# data
parser.add_argument('--dataset_name', type=str, default='spirals',
help='Name of dataset to use.')
parser.add_argument('--n_data_points', default=int(1e6),
help='Number of unique data points in training set.')
parser.add_argument('--batch_size', type=int, default=256,
help='Size of batch used for training.')
parser.add_argument('--num_workers', type=int, default=0,
help='Number of workers used in data loaders.')
# MADE
parser.add_argument('--n_residual_blocks_made', default=4,
help='Number of residual blocks in MADE.')
parser.add_argument('--hidden_dim_made', default=256,
help='Dimensionality of hidden layers in MADE.')
parser.add_argument('--activation_made', default='relu',
help='Activation function for MADE.')
parser.add_argument('--use_batch_norm_made', default=False,
help='Whether to use batch norm in MADE.')
parser.add_argument('--dropout_probability_made', default=None,
help='Dropout probability for MADE.')
# energy net
parser.add_argument('--context_dim', default=64,
help='Dimensionality of context vector.')
parser.add_argument('--n_residual_blocks_energy_net', default=4,
help='Number of residual blocks in energy net.')
parser.add_argument('--hidden_dim_energy_net', default=128,
help='Dimensionality of hidden layers in energy net.')
parser.add_argument('--energy_upper_bound', default=0,
help='Max value for output of energy net.')
parser.add_argument('--activation_energy_net', default='relu',
help='Activation function for energy net.')
parser.add_argument('--use_batch_norm_energy_net', default=False,
help='Whether to use batch norm in energy net.')
parser.add_argument('--dropout_probability_energy_net', default=None,
help='Dropout probability for energy net.')
parser.add_argument('--scale_activation', default='softplus',
help='Activation to use for scales in proposal mixture components.')
parser.add_argument('--apply_context_activation', default=False,
help='Whether to apply activation to context vector.')
# proposal
parser.add_argument('--n_mixture_components', default=10,
help='Number of proposal mixture components (per dimension).')
parser.add_argument('--proposal_component', default='gaussian',
help='Type of location-scale family distribution '
'to use in proposal mixture.')
parser.add_argument('--n_proposal_samples_per_input', default=20,
help='Number of proposal samples used to estimate '
'normalizing constant during training.')
parser.add_argument('--n_proposal_samples_per_input_validation', default=100,
help='Number of proposal samples used to estimate '
'normalizing constant during validation.')
parser.add_argument('--mixture_component_min_scale', default=1e-3,
help='Minimum scale for proposal mixture components.')
# optimization
parser.add_argument('--learning_rate', default=5e-4,
help='Learning rate for Adam.')
parser.add_argument('--n_total_steps', default=int(4e5),
help='Number of total training steps.')
parser.add_argument('--alpha_warm_up_steps', default=5000,
help='Number of warm-up steps for AEM density.')
parser.add_argument('--hard_alpha_warm_up', default=True,
help='Whether to use a hard warm up for alpha')
# logging and checkpoints
parser.add_argument('--monitor_interval', default=100,
help='Interval in steps at which to report training stats.')
parser.add_argument('--visualize_interval', default=10000,
help='Interval in steps at which to report training stats.')
parser.add_argument('--save_interval', default=10000,
help='Interval in steps at which to save model.')
# reproducibility
parser.add_argument('--seed', default=1638128,
help='Random seed for PyTorch and NumPy.')
args = parser.parse_args()
torch.manual_seed(args.seed)
np.random.seed(args.seed)
if args.use_gpu and torch.cuda.is_available():
device = torch.device('cuda')
torch.set_default_tensor_type('torch.cuda.FloatTensor')
else:
device = torch.device('cpu')
# Generate data
train_dataset = data_.load_plane_dataset(args.dataset_name, args.n_data_points)
train_loader = data_.InfiniteLoader(
dataset=train_dataset,
batch_size=args.batch_size,
shuffle=True,
drop_last=True,
num_epochs=None
)
# Generate test grid data
n_points_per_axis = 512
bounds = np.array([
[-4, 4],
[-4, 4]
])
grid_dataset = data_.TestGridDataset(n_points_per_axis=n_points_per_axis, bounds=bounds)
grid_loader = data.DataLoader(
dataset=grid_dataset,
batch_size=1000,
drop_last=False
)
# various dimensions for autoregressive and energy nets
dim = 2 # D
output_dim_multiplier = args.context_dim + 3 * args.n_mixture_components # K + 3M
# Create MADE
made = models.ResidualMADE(
input_dim=dim,
n_residual_blocks=args.n_residual_blocks_made,
hidden_dim=args.hidden_dim_made,
output_dim_multiplier=output_dim_multiplier,
conditional=False,
activation=utils.parse_activation(args.activation_made),
use_batch_norm=args.use_batch_norm_made,
dropout_probability=args.dropout_probability_made
).to(device)
# create energy net
energy_net = models.ResidualEnergyNet(
input_dim=(args.context_dim + 1),
n_residual_blocks=args.n_residual_blocks_energy_net,
hidden_dim=args.hidden_dim_energy_net,
energy_upper_bound=args.energy_upper_bound,
activation=utils.parse_activation(args.activation_energy_net),
use_batch_norm=args.use_batch_norm_energy_net,
dropout_probability=args.dropout_probability_energy_net
).to(device)
# create AEM
aem = models.AEM(
autoregressive_net=made,
energy_net=energy_net,
context_dim=args.context_dim,
n_proposal_mixture_components=args.n_mixture_components,
proposal_component_family=args.proposal_component,
n_proposal_samples_per_input=args.n_proposal_samples_per_input,
mixture_component_min_scale=args.mixture_component_min_scale,
apply_context_activation=args.apply_context_activation
).to(device)
# make optimizer
parameters = list(made.parameters()) + list(energy_net.parameters())
optimizer = optim.Adam(parameters, lr=args.learning_rate)
scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, args.n_total_steps)
# create summary writer and write to log directory
timestamp = io.get_timestamp()
log_dir = os.path.join(io.get_log_root(), args.dataset_name, timestamp)
writer = SummaryWriter(log_dir=log_dir)
filename = os.path.join(log_dir, 'config.json')
with open(filename, 'w') as file:
json.dump(vars(args), file)
# Training loop
tbar = tqdm(range(args.n_total_steps))
alpha = 0
for step in tbar:
aem.train()
scheduler.step(step)
optimizer.zero_grad()
# training step
batch = next(train_loader).to(device)
log_density, log_proposal_density, _, log_normalizer = aem(batch)
mean_log_density = torch.mean(log_density)
mean_log_proposal_density = torch.mean(log_proposal_density)
mean_log_normalizer = torch.mean(log_normalizer)
if args.alpha_warm_up_steps is not None:
if args.hard_alpha_warm_up:
alpha = float(step > args.alpha_warm_up_steps)
else:
alpha = torch.Tensor([min(step / args.alpha_warm_up_steps, 1)])
loss = - (alpha * mean_log_density + mean_log_proposal_density)
else:
loss = - (mean_log_density + mean_log_proposal_density)
loss.backward()
optimizer.step()
if (step + 1) % args.monitor_interval == 0:
s = 'Loss: {:.4f}, log p: {:.4f}, log q: {:.4f}'.format(
loss.item(),
mean_log_density.item(),
mean_log_proposal_density.item()
)
tbar.set_description(s)
# write summaries
summaries = {
'loss': loss.detach(),
'log-prob-aem': mean_log_density.detach(),
'log-prob-proposal': mean_log_proposal_density.detach(),
'log-normalizer': mean_log_normalizer.detach(),
'learning-rate': torch.Tensor(scheduler.get_lr()),
}
for summary, value in summaries.items():
writer.add_scalar(tag=summary, scalar_value=value, global_step=step)
if (step + 1) % args.visualize_interval == 0:
# Plotting
aem.eval()
aem.set_n_proposal_samples_per_input_validation(
args.n_proposal_samples_per_input_validation)
log_density_np = []
log_proposal_density_np = []
for batch in grid_loader:
batch = batch.to(device)
log_density, log_proposal_density, unnormalized_log_density, log_normalizer = aem(
batch)
log_density_np = np.concatenate((
log_density_np, utils.tensor2numpy(log_density)
))
log_proposal_density_np = np.concatenate((
log_proposal_density_np, utils.tensor2numpy(log_proposal_density)
))
fig, axs = plt.subplots(1, 3, figsize=(7.5, 2.5))
axs[0].hist2d(train_dataset.data[:, 0], train_dataset.data[:, 1],
range=bounds, bins=512, cmap=cm.viridis, rasterized=False)
axs[0].set_xticks([])
axs[0].set_yticks([])
axs[1].pcolormesh(grid_dataset.X, grid_dataset.Y,
np.exp(log_proposal_density_np).reshape(grid_dataset.X.shape))
axs[1].set_xlim(bounds[0])
axs[1].set_ylim(bounds[1])
axs[1].set_xticks([])
axs[1].set_yticks([])
axs[2].pcolormesh(grid_dataset.X, grid_dataset.Y,
np.exp(log_density_np).reshape(grid_dataset.X.shape))
axs[2].set_xlim(bounds[0])
axs[2].set_ylim(bounds[1])
axs[2].set_xticks([])
axs[2].set_yticks([])
plt.tight_layout()
path = os.path.join(io.get_output_root(), 'pytorch', '{}.png'.format(args.dataset_name))
if not os.path.exists(path):
os.makedirs(io.get_output_root())
plt.savefig(path, dpi=300)
writer.add_figure(tag='test-grid', figure=fig, global_step=step)
plt.close()
if (step + 1) % args.save_interval == 0:
path = os.path.join(io.get_checkpoint_root(), 'pytorch', '{}.t'.format(args.dataset_name))
if not os.path.exists(path):
os.makedirs(io.get_checkpoint_root())
torch.save(aem.state_dict(), path)
path = os.path.join(io.get_checkpoint_root(),
'pytorch', '{}-{}.t'.format(args.dataset_name, timestamp))
torch.save(aem.state_dict(), path)
|
11502
|
import logging
import os.path as path
from typing import List, Optional, Tuple
from psychopy import core, visual
from bcipy.acquisition.marker_writer import NullMarkerWriter, MarkerWriter
from bcipy.helpers.task import SPACE_CHAR
from bcipy.helpers.stimuli import resize_image
from bcipy.helpers.system_utils import get_screen_resolution
from bcipy.helpers.triggers import TriggerCallback, _calibration_trigger
class RSVPDisplay(object):
"""RSVP Display Object for inquiry Presentation.
Animates a inquiry in RSVP. Mode should be determined outside.
"""
def __init__(
self,
window: visual.Window,
static_clock,
experiment_clock: core.Clock,
marker_writer: Optional[MarkerWriter] = None,
task_color: List[str] = ['white'],
task_font: str = 'Times',
task_pos: Tuple[float, float] = (-.8, .9),
task_height: float = 0.2,
task_text: str = '1/100',
info_color: List[str] = ['white'],
info_text: List[str] = ['Information Text'],
info_font: List[str] = ['Times'],
info_pos=[(.8, .9)],
info_height=[0.2],
stim_font='Times',
stim_pos=(-.8, .9),
stim_height=0.2,
stim_inquiry: List[str] = ['a'] * 10,
stim_colors: List[str] = ['white'] * 10,
stim_timing: List[float] = [1] * 10,
is_txt_stim: bool = True,
static_time: float = .05,
trigger_type: str = 'image',
space_char: SPACE_CHAR = SPACE_CHAR):
"""Initialize RSVP window parameters and objects.
PARAMETERS:
----------
# Experiment
window(visual.Window): PsychoPy Window
static_clock(TODO): no idea
experiment_clock(core.Clock): Clock used to timestamp experiment
marker_writer(MarkerWriter): object used to write triggers to
the daq stream.
# Task
task_color(list[string]): Color of the task string. Shares the
length of the task_text. If of length 1 the entire task
bar shares the same color.
task_font(string): Font of task string
task_pos(tuple): position of task string
task_height(float): height for task string
task_text(string): text of the task bar
# Info
info_text(list[string]): Text list for information texts
info_color(list[string]): Color of the information text string
info_font(list[string]): Font of the information text string
info_pos(list[tuple]): Position of the information text string
info_height(list[float]): Height of the information text string
# Stimuli
stim_height(float): height of the stimuli object
stim_pos(tuple): position of stimuli
stim_font(string): font of the stimuli
stim_inquiry(list[string]): list of elements to flash
stim_colors(list[string]): list of colors for stimuli
stim_timing(list[float]): timing for each letter flash
"""
self.window = window
self.refresh_rate = window.getActualFrameRate()
self.logger = logging.getLogger(__name__)
self.stimuli_inquiry = stim_inquiry
self.stimuli_colors = stim_colors
self.stimuli_timing = stim_timing
self.is_txt_stim = is_txt_stim
self.staticPeriod = static_clock
self.static_time = static_time
self.experiment_clock = experiment_clock
self.timing_clock = core.Clock()
# Used to handle writing the marker stimulus
self.marker_writer = marker_writer or NullMarkerWriter()
# Length of the stimuli (number of flashes)
self.stim_length = len(stim_inquiry)
# Informational Parameters
self.info_text = info_text
# Stim parameters
self.stimuli_font = stim_font
self.stimuli_height = stim_height
self.stimuli_pos = stim_pos
# Trigger Items
self.first_run = True
self.trigger_type = trigger_type
self.trigger_callback = TriggerCallback()
# Callback used on presentation of first stimulus.
self.first_stim_callback = lambda _sti: None
self.size_list_sti = []
self.space_char = space_char
self.task = visual.TextStim(win=self.window, color=task_color[0],
height=task_height,
text=task_text,
font=task_font, pos=task_pos,
wrapWidth=None, colorSpace='rgb',
opacity=1, depth=-6.0)
# Create multiple text objects based on input
self.text = []
for idx in range(len(self.info_text)):
self.text.append(visual.TextStim(
win=self.window,
color=info_color[idx],
height=info_height[idx],
text=self.info_text[idx],
font=info_font[idx],
pos=info_pos[idx],
wrapWidth=None, colorSpace='rgb',
opacity=1, depth=-6.0))
# Create Stimuli Object
if self.is_txt_stim:
self.sti = visual.TextStim(
win=self.window,
color='white',
height=self.stimuli_height,
text='+',
font=self.stimuli_font,
pos=self.stimuli_pos,
wrapWidth=None, colorSpace='rgb',
opacity=1, depth=-6.0)
else:
self.sti = visual.ImageStim(
win=self.window,
image=None,
mask=None,
pos=self.stimuli_pos,
ori=0.0)
def draw_static(self):
"""Draw static elements in a stimulus."""
self.task.draw()
for idx in range(len(self.text)):
self.text[idx].draw()
def schedule_to(self, ele_list=[], time_list=[], color_list=[]):
"""Schedule stimuli elements (works as a buffer).
Args:
ele_list(list[string]): list of elements of stimuli
time_list(list[float]): list of timings of stimuli
color_list(list[string]): colors of elements of stimuli
"""
self.stimuli_inquiry = ele_list
self.stimuli_timing = time_list
self.stimuli_colors = color_list
def update_task(self, text: str, color_list: List[str], pos: Tuple[float]):
"""Update Task Object.
PARAMETERS:
-----------
text: text for task
color_list: list of the colors for each char
pos: position of task
"""
self.task.text = text
self.task.color = color_list[0]
self.task.pos = pos
def do_inquiry(self):
"""Do inquiry.
Animates a inquiry of flashing letters to achieve RSVP.
"""
# init an array for timing information
timing = []
if self.first_run:
# play a inquiry start sound to help orient triggers
first_stim_timing = _calibration_trigger(
self.experiment_clock,
trigger_type=self.trigger_type, display=self.window,
on_trigger=self.marker_writer.push_marker)
timing.append(first_stim_timing)
self.first_stim_time = first_stim_timing[-1]
self.first_run = False
# generate a inquiry (list of stimuli with meta information)
inquiry = self._generate_inquiry()
# do the inquiry
for idx in range(len(inquiry)):
self.is_first_stim = (idx == 0)
# set a static period to do all our stim setting.
# will warn if ISI value is violated.
self.staticPeriod.name = 'Stimulus Draw Period'
self.staticPeriod.start(self.stimuli_timing[idx])
# Reset the timing clock to start presenting
self.window.callOnFlip(
self.trigger_callback.callback,
self.experiment_clock,
inquiry[idx]['sti_label'])
self.window.callOnFlip(self.marker_writer.push_marker, inquiry[idx]['sti_label'])
if idx == 0 and callable(self.first_stim_callback):
self.first_stim_callback(inquiry[idx]['sti'])
# Draw stimulus for n frames
inquiry[idx]['sti'].draw()
self.draw_static()
self.window.flip()
core.wait((inquiry[idx]['time_to_present'] - 1) / self.refresh_rate)
# End static period
self.staticPeriod.complete()
# append timing information
if self.is_txt_stim:
timing.append(self.trigger_callback.timing)
else:
timing.append(self.trigger_callback.timing)
self.trigger_callback.reset()
# draw in static and flip once more
self.draw_static()
self.window.flip()
return timing
def _generate_inquiry(self):
"""Generate inquiry.
Generate stimuli for next RSVP inquiry.
"""
stim_info = []
for idx in range(len(self.stimuli_inquiry)):
current_stim = {}
# turn ms timing into frames! Much more accurate!
current_stim['time_to_present'] = int(self.stimuli_timing[idx] * self.refresh_rate)
# check if stimulus needs to use a non-default size
if self.size_list_sti:
this_stimuli_size = self.size_list_sti[idx]
else:
this_stimuli_size = self.stimuli_height
# Set the Stimuli attrs
if self.stimuli_inquiry[idx].endswith('.png'):
current_stim['sti'] = self.create_stimulus(mode='image', height_int=this_stimuli_size)
current_stim['sti'].image = self.stimuli_inquiry[idx]
current_stim['sti'].size = resize_image(
current_stim['sti'].image, current_stim['sti'].win.size, this_stimuli_size)
current_stim['sti_label'] = path.splitext(
path.basename(self.stimuli_inquiry[idx]))[0]
else:
# text stimulus
current_stim['sti'] = self.create_stimulus(mode='text', height_int=this_stimuli_size)
txt = self.stimuli_inquiry[idx]
# customize presentation of space char.
current_stim['sti'].text = txt if txt != SPACE_CHAR else self.space_char
current_stim['sti'].color = self.stimuli_colors[idx]
current_stim['sti_label'] = txt
# test whether the word will be too big for the screen
text_width = current_stim['sti'].boundingBox[0]
if text_width > self.window.size[0]:
monitor_width, monitor_height = get_screen_resolution()
text_height = current_stim['sti'].boundingBox[1]
# If we are in full-screen, text size in Psychopy norm units
# is monitor width/monitor height
if self.window.size[0] == monitor_width:
new_text_width = monitor_width / monitor_height
else:
# If not, text width is calculated relative to both
# monitor size and window size
new_text_width = (
self.window.size[1] / monitor_height) * (
monitor_width / monitor_height)
new_text_height = (text_height * new_text_width) / text_width
current_stim['sti'].height = new_text_height
stim_info.append(current_stim)
return stim_info
def update_task_state(self, text: str, color_list: List[str]) -> None:
"""Update task state.
Removes letters or appends to the right.
Args:
text(string): new text for task state
color_list(list[string]): list of colors for each
"""
task_state_text = visual.TextStim(
win=self.window, font=self.task.font, text=text)
x_task_position = task_state_text.boundingBox[0] / \
self.window.size[0] - 1
task_pos = (x_task_position, 1 - self.task.height)
self.update_task(text=text, color_list=color_list, pos=task_pos)
def wait_screen(self, message, color):
"""Wait Screen.
Args:
message(string): message to be displayed while waiting
"""
# Construct the wait message
wait_message = visual.TextStim(win=self.window, font=self.stimuli_font,
text=message,
height=.1,
color=color,
pos=(0, -.5),
wrapWidth=2,
colorSpace='rgb',
opacity=1, depth=-6.0)
# Try adding our BCI logo. Pass if not found.
try:
wait_logo = visual.ImageStim(
self.window,
image='bcipy/static/images/gui_images/bci_cas_logo.png',
pos=(0, .5),
mask=None,
ori=0.0)
wait_logo.size = resize_image(
'bcipy/static/images/gui_images/bci_cas_logo.png',
self.window.size, 1)
wait_logo.draw()
except Exception:
self.logger.debug('Cannot load logo image')
pass
# Draw and flip the screen.
wait_message.draw()
self.window.flip()
def create_stimulus(self, height_int: int, mode: str = 'text'):
"""Create Stimulus.
Returns a TextStim or ImageStim object.
Args:
height_int: The height of the stimulus
mode: "text" or "image", determines which to return
"""
if mode == 'text':
return visual.TextStim(
win=self.window,
color='white',
height=height_int,
text='+',
font=self.stimuli_font,
pos=self.stimuli_pos,
wrapWidth=None,
colorSpace='rgb',
opacity=1,
depth=-6.0)
if mode == 'image':
return visual.ImageStim(
win=self.window,
image=None,
mask=None,
units='',
pos=self.stimuli_pos,
size=(height_int, height_int),
ori=0.0)
|
11540
|
from __future__ import division
import itertools
import json
import math
import os
import random
import shutil
import subprocess
import sys
durationA = str(5)
durationB = str(4)
durationC = str(1)
def main():
if len(sys.argv) > 1:
nbDepth = int(sys.argv[1])
if nbDepth < 2 :
nbDepth =2
else :
nbDepth =2
mainFolder = "depth"
if not os.path.exists(mainFolder):
subprocess.call(["mkdir", mainFolder])
generateDomain("depth", nbDepth)
#print "Every file has been written. Exiting"
def generateDomain(folderName, nbDepth):
domainFilename = folderName + "/" + folderName + "-flat" + str(nbDepth) + ".dom.anml"
printDomainToFile(domainFilename, nbDepth)
domainFilename = folderName + "/" + folderName + "-hier" + str(nbDepth) + ".dom.anml"
printDomainHierToFile(domainFilename, nbDepth)
def printDomainToFile(domainFilename, nbDepth):
with open(domainFilename, "w") as f:
for i in range(0, nbDepth):
f.write("predicate a" + str(i+1) +"();\n")
f.write("predicate b" + str(i+1) +"();\n")
f.write("predicate c" + str(i+1) +"();\n")
f.write("predicate d" + str(i+1) +"();\n")
f.write("predicate e" + str(i+1) +"();\n")
f.write("\naction An" + str(i+1) + " () {\n")
f.write("\tduration := " + durationA + ";\n")
if i > 0:
f.write("\t[start] {\n")
f.write("\t\tb"+ str(i) +" == true;\n")
f.write("\t\td"+ str(i) +" == true;\n")
f.write("\t\te"+ str(i) +" == true;\n")
f.write("\t};\n")
f.write("\t[start] a" + str(i+1) + " := true;\n")
f.write("\t[end] {\n")
f.write("\t\ta" + str(i+1) + " := false;\n")
f.write("\t\tb" + str(i+1) + " := true;\n")
f.write("\t\td" + str(i+1) + " := false;\n")
f.write("\t};\n")
f.write("};\n")
f.write("\naction Bn" + str(i+1) + " () {\n")
f.write("\tduration := " + durationB + ";\n")
f.write("\t[start] a" + str(i+1) + " == true;\n")
f.write("\t[start] c" + str(i+1) + " := true;\n")
f.write("\t[end] {\n")
f.write("\t\tc" + str(i+1) + " := false;\n")
f.write("\t\td" + str(i+1) + " := true;\n")
f.write("\t};\n")
f.write("};\n")
f.write("\naction Cn" + str(i+1) + " () {\n")
f.write("\tduration := " + durationC + ";\n")
f.write("\t[start] c" + str(i+1) + " == true;\n")
f.write("\t[end] {\n")
f.write("\t\tb" + str(i+1) + " := false;\n")
f.write("\t\te" + str(i+1) + " := true;\n")
f.write("\t};\n")
f.write("};\n")
######################## problem ###############
f.write("\n/*******Problem************/\n")
f.write("[all] contains{\n")
f.write("\tCn" + str(nbDepth) +"();\n")
f.write("};")
def printDomainHierToFile(domainFilename, nbDepth):
with open(domainFilename, "w") as f:
for i in range(0, nbDepth):
if i == 0:
f.write("\naction An" + str(i+1) + " () {\n")
f.write("\tmotivated;\n")
f.write("\tduration := " + durationA + ";\n")
f.write("};\n")
else:
f.write("\naction An" + str(i+1) + " () {\n")
f.write("\tmotivated;\n")
f.write("\tduration := " + durationA + ";\n")
f.write("\ta : ABC" + str(i) + "();\n")
f.write("\t end(a) < start;\n")
f.write("};\n")
f.write("\naction Bn" + str(i+1) + " () {\n")
f.write("\tduration := " + durationB + ";\n")
f.write("\tmotivated;\n")
f.write("};\n")
f.write("\naction Cn" + str(i+1) + " () {\n")
f.write("\tduration := " + durationC + ";\n")
f.write("\tmotivated;\n")
f.write("};\n")
f.write("\naction ABC" + str(i+1) + " () {\n")
f.write("\t[all] contains {\n")
f.write("\t\t b" + str(i+1) + " : An" + str(i+1) + "();\n")
f.write("\t\t d" + str(i+1) + " : Bn" + str(i+1) + "();\n")
f.write("\t\t e" + str(i+1) + " : Cn" + str(i+1) + "();\n")
f.write("\t};\n")
f.write("\tstart(b" + str(i+1) + ") < start(d" + str(i+1) + ");\n")
f.write("\tend(d" + str(i+1) + ") < end(b" + str(i+1) + ");\n")
f.write("\tstart(d" + str(i+1) + ") < start(e" + str(i+1) + ");\n")
f.write("\tend(e" + str(i+1) + ") < end(d" + str(i+1) + ");\n")
f.write("};\n")
#################### problem #############
f.write("\n/*******Problem************/\n")
f.write("[all] contains{\n")
f.write("\tCn" + str(nbDepth) +"();\n")
f.write("};")
if __name__ == "__main__":
main()
|
11543
|
import decimal
from django import template
register = template.Library()
@register.simple_tag
def can_change_status(payment_request, user):
return payment_request.can_user_change_status(user)
@register.simple_tag
def can_delete(payment_request, user):
return payment_request.can_user_delete(user)
@register.simple_tag
def can_edit(payment_request, user):
return payment_request.can_user_edit(user)
@register.simple_tag
def percentage(value, total):
if not total:
return decimal.Decimal(0)
unrounded_total = (value / total) * 100
# round using Decimal since we're dealing with currency
rounded_total = unrounded_total.quantize(
decimal.Decimal('0.0'),
rounding=decimal.ROUND_DOWN,
)
return rounded_total
|
11564
|
class _FuncStorage:
def __init__(self):
self._function_map = {}
def insert_function(self, name, function):
self._function_map[name] = function
def get_all_functions(self):
return self._function_map
|
11587
|
from typing import List, Type
from apiron.service.base import ServiceBase
class DiscoverableService(ServiceBase):
"""
A Service whose hosts are determined via a host resolver.
A host resolver is any class with a :func:`resolve` method
that takes a service name as its sole argument
and returns a list of host names that correspond to that service.
"""
host_resolver_class: Type
service_name: str
@classmethod
def get_hosts(cls) -> List[str]:
return cls.host_resolver_class.resolve(cls.service_name)
def __str__(self) -> str:
return self.service_name
def __repr__(self) -> str:
klass = self.__class__
return "{klass}(service_name={service_name}, host_resolver={host_resolver})".format(
klass=klass.__name__, service_name=klass.service_name, host_resolver=klass.host_resolver_class.__name__
)
|
11594
|
import pytest
from apostello import models
@pytest.mark.slow
@pytest.mark.django_db
class TestContactForm:
"""Test the sending of SMS."""
def test_number_permissions_staff_exception(self, recipients, users):
"""Test sending a message now."""
calvin = recipients["calvin"]
# check good post:
prof = users["staff"].profile
prof.can_see_contact_nums = False
prof.save()
r = users["c_staff"].post(
f"/api/v2/recipients/{calvin.pk}/",
{
"pk": calvin.pk,
"first_name": calvin.first_name,
"last_name": calvin.last_name,
"number": "+447900000000",
"do_not_reply": calvin.do_not_reply,
},
)
assert r.status_code == 200
calvin.refresh_from_db()
assert calvin.number == "+447900000000"
def test_number_permissions_no_perm(self, recipients, users):
calvin = recipients["calvin"]
r = users["c_in"].post(
f"/api/v2/recipients/{calvin.pk}/",
{
"pk": calvin.pk,
"first_name": calvin.first_name,
"last_name": calvin.last_name,
"number": "+447900000000",
"do_not_reply": calvin.do_not_reply,
},
)
assert r.status_code == 400
assert "You do not have permission to change the number field." in r.json()["errors"]["__all__"]
def test_number_permissions_with_perm(self, recipients, users):
calvin = recipients["calvin"]
# check good post:
prof = users["notstaff2"].profile
prof.can_see_contact_nums = True
prof.save()
r = users["c_in"].post(
f"/api/v2/recipients/{calvin.pk}/",
{
"pk": calvin.pk,
"first_name": calvin.first_name,
"last_name": calvin.last_name,
"number": "+447900000001",
"do_not_reply": calvin.do_not_reply,
},
)
assert r.status_code == 200
calvin.refresh_from_db()
assert calvin.number == "+447900000001"
def test_notes_permissions_staff_exception(self, recipients, users):
"""Test sending a message now."""
calvin = recipients["calvin"]
# check good post:
prof = users["staff"].profile
prof.can_see_contact_notes = False
prof.save()
r = users["c_staff"].post(
f"/api/v2/recipients/{calvin.pk}/",
{
"pk": calvin.pk,
"first_name": calvin.first_name,
"last_name": calvin.last_name,
"number": calvin.number,
"do_not_reply": calvin.do_not_reply,
"notes": "hi there",
},
)
assert r.status_code == 200
calvin.refresh_from_db()
assert calvin.notes == "hi there"
def test_notes_permissions_no_perm(self, recipients, users):
calvin = recipients["calvin"]
r = users["c_in"].post(
f"/api/v2/recipients/{calvin.pk}/",
{
"pk": calvin.pk,
"first_name": calvin.first_name,
"last_name": calvin.last_name,
"do_not_reply": calvin.do_not_reply,
"notes": "hi there",
},
)
assert r.status_code == 400
assert "You do not have permission to change the notes field." in r.json()["errors"]["__all__"]
calvin.refresh_from_db()
assert not (calvin.notes == "hi there")
def test_notes_permissions_with_perm(self, recipients, users):
calvin = recipients["calvin"]
# check good post:
prof = users["notstaff2"].profile
prof.can_see_contact_notes = True
prof.save()
r = users["c_in"].post(
f"/api/v2/recipients/{calvin.pk}/",
{
"pk": calvin.pk,
"first_name": calvin.first_name,
"last_name": calvin.last_name,
"do_not_reply": calvin.do_not_reply,
"notes": "something something",
},
)
assert r.status_code == 200
calvin.refresh_from_db()
assert calvin.notes == "something something"
|
11636
|
import re
import types
from functools import partial
LITERAL_TYPE = types.StringTypes + (int, float, long, bool, )
class Spec(object):
"""
This object, when overridden with an object that implements a file format
specification, will perform validation on a given parsed version of the
format input.
SPEC Node Documentation:
========================
expected_type:
A type object whose type the object should match.
required_nodes:
A list of nodes that are required for the current node.
required_nodes_when:
A dict of node name/lambda pairs. If the lambda evaluates to True, a
node whose name corresponds to the node name is required.
The current node is passed as a parameter to the lambda as the only
argument.
disallowed_nodes:
A list of nodes that explicitly are disallowed in the current node.
allowed_once_nodes:
A list of nodes that are allowed only once.
allowed_nodes:
A list of nodes that are allowed multiple times.
unknown_node_level:
The message type to return when an unknown node is encountered.
child_nodes:
A dict of node definitions for nodes that can exist within this node.
max_length:
For sequence values only. An integer describing the maximum length of
the string.
not_empty:
A boolean value describing whether the string/list/dict can be empty.
values:
A list of possible values for the node. Only applies to lists and
literal nodes.
value_matches:
If `values` is not set, the value must match this regex. Only applies
to string nodes.
process:
A lambda function that returns a function to process the node. The
lambda accepts one parameter (self) and should return a function that
accepts two parameters (self, node).
child_process:
A lambda function (similar to `process` that returns a function to
process a child node. The lambda accepts one parameter (self) and
should return a function that accepts three parameters (self, node_name,
node).
If this is set, no further testing will take place on child nodes.
"""
SPEC_NAME = "Specification"
MORE_INFO = "You can find more info online."
SPEC = None
def __init__(self, data, err):
self.data = self.parse(data)
self.err = err
self.error = partial(self._err_message, self.err.error)
self.warning = partial(self._err_message, self.err.warning)
self.notice = partial(self._err_message, self.err.notice)
self.err_map = {"error": self.error,
"warning": self.warning,
"notice": self.notice}
self.path = []
def _err_message(self, func, *args, **kwargs):
if self.path:
nodepath = "Node: %s" % self._get_path()
if isinstance(kwargs["description"], list):
kwargs["description"].append(nodepath)
else:
kwargs["description"] = [
kwargs["description"], nodepath]
func(*args, **kwargs)
def _message(self, type_, *args, **kwargs):
kwargs[type_] = kwargs.pop("message")
self.err_map[type_](*args, **kwargs)
def validate(self):
# Validate the root node.
root_name, root_node = self.get_root_node(self.data)
root_val_result = self.validate_root_node(root_node)
if root_val_result == False:
return
# Iterate the tree and validate as we go.
self.iterate(root_name, root_node, self.SPEC)
def parse(self, data): pass
def validate_root_node(self, node): pass
def get_root_node(self, data):
"""
We expect this function to return a tuple:
("Root Node Name", root_node)
"""
def has_attribute(self, node, key): pass
def get_attribute(self, node, key): pass
def has_child(self, node, child_name): pass
def get_children(self, node):
"""
This function should return a list of (child_name, child)-form tuples.
"""
def iterate(self, branch_name, branch, spec_branch):
self.path.append(branch_name)
self._iterate(branch_name, branch, spec_branch)
self.path.pop()
def _get_path(self):
return ' > '.join(self.path)
def _iterate(self, branch_name, branch, spec_branch):
"""Iterate the tree of nodes and validate as we go."""
# Check that the node is of the proper type. If it isn't, then we need
# to stop iterating at this point.
exp_type = spec_branch.get("expected_type")
if (exp_type and
not isinstance(branch, exp_type) or
# Handle `isinstance(True, int) == True` :(
(isinstance(branch, bool) and
(exp_type == int if isinstance(exp_type, type) else
bool not in exp_type))):
self.error(
err_id=("spec", "iterate", "bad_type"),
error="%s's `%s` was of an unexpected type." %
(self.SPEC_NAME, branch_name),
description=["While validating a %s, a `%s` was encountered "
"which is of an improper type." %
(self.SPEC_NAME, branch_name),
"Found: %s" % repr(branch),
self.MORE_INFO])
return
# Handle any generic processing.
if "process" in spec_branch:
# Let the spec processor resolve the processor and then run the
# processor.
spec_branch["process"](self)(branch)
if "not_empty" in spec_branch and not branch:
self.error(
err_id=("spec", "iterate", "empty"),
error="`%s` is empty." % branch_name,
description=["A value was expected for `%s`, but one wasn't "
"found." % branch_name,
self.MORE_INFO])
# If the node isn't an object...
if not isinstance(branch, dict):
if "values" in spec_branch and branch not in spec_branch["values"]:
self.error(
err_id=("spec", "iterate", "bad_value"),
error="`%s` contains an invalid value in %s" %
(branch_name, self.SPEC_NAME),
description=["A `%s` was encountered while validating a "
"`%s` containing the value '%s'. This value "
"is not appropriate for this type of "
"element." %
(branch_name, self.SPEC_NAME, branch),
self.MORE_INFO])
elif ("value_matches" in spec_branch and
isinstance(branch, types.StringTypes)):
raw_pattern = spec_branch["value_matches"]
if not re.match(raw_pattern, branch):
self.error(
err_id=("spec", "iterate", "value_pattern_fail"),
error="`%s` contains an invalid value in %s" %
(branch_name, self.SPEC_NAME),
description=["A `%s` was encountered while validating "
"a `%s`. Its value does not match the "
"pattern required for `%s`s." %
(branch_name, self.SPEC_NAME,
branch_name),
"Found value: %s" % branch,
"Pattern: %s" % raw_pattern,
self.MORE_INFO])
if ("max_length" in spec_branch and
len(branch) > spec_branch["max_length"]):
self.error(
err_id=("spec", "iterate", "max_length"),
error="`%s` has exceeded its maximum length." % branch_name,
description=["`%s` has a maximum length (%d), which has "
"been exceeded (%d)." %
(branch_name, spec_branch["max_length"],
len(branch)),
self.MORE_INFO])
# The rest of the tests are for child items.
if not isinstance(branch, (list, tuple)):
return
if "child_nodes" in spec_branch:
for child in branch:
self.iterate(branch_name + " descendant", child,
spec_branch["child_nodes"])
# We've got nothing else to do with lists.
return
# If we need to process the child nodes individually, do that now.
if "child_process" in spec_branch:
processor = spec_branch["child_process"](self)
for child_name, child in self.get_children(branch):
processor(child_name, child)
# If there's nothing else to do, don't go down that path.
if ("required_nodes" not in spec_branch and
"required_nodes_when" not in spec_branch and
"disallowed_nodes" not in spec_branch):
return
considered_nodes = set()
# Check that all required node as present.
if "required_nodes" in spec_branch:
considered_nodes.update(spec_branch["required_nodes"])
for req_node in [n for n in spec_branch["required_nodes"] if
not self.has_child(branch, n)]:
self.error(
err_id=("spec", "iterate", "missing_req"),
error="%s expecting `%s`" % (self.SPEC_NAME, req_node),
description=["The '%s' node of the %s expects a `%s` "
"element, which was not found." %
(branch_name, self.SPEC_NAME, req_node),
self.MORE_INFO])
# Check that conditionally required nodes are present.
if "required_nodes_when" in spec_branch:
considered_nodes.update(spec_branch["required_nodes_when"].keys())
for req_node in [name for name, cond in
spec_branch["required_nodes_when"].items() if
cond(branch) and not self.has_child(branch, name)]:
self.error(
err_id=("spec", "iterate", "missing_req_cond"),
error="%s expecting `%s`" % (self.SPEC_NAME, req_node),
description=["The '%s' node, under the current "
"circumstances, is missing a `%s` element. "
"This is a required condition of a %s." %
(branch_name, req_node, self.SPEC_NAME),
self.MORE_INFO])
# Check that there are no disallowed nodes.
if "disallowed_nodes" in spec_branch:
disallowed_nodes = spec_branch["disallowed_nodes"]
considered_nodes.update(disallowed_nodes)
for dnode in [n for n in disallowed_nodes if
self.has_child(branch, n)]:
self.error(
err_id=("spec", "iterate", "disallowed"),
error="%s found `%s`, which is not allowed." %
(self.SPEC_NAME, dnode),
description=["The '%s' node contains `%s`, which is a "
"disallowed element. It should be removed." %
(branch_name, dnode),
self.MORE_INFO])
if ("allowed_nodes" not in spec_branch and
"allowed_once_nodes" not in spec_branch):
return
# Check that allowed nodes are obeyed.
allowed_nodes = set(spec_branch.setdefault("allowed_nodes", []))
allowed_once_nodes = spec_branch.setdefault("allowed_once_nodes", [])
allowed_nodes.update(allowed_once_nodes)
child_node_specs = spec_branch.setdefault("child_nodes", {})
seen_nodes = set()
warned_nodes = set()
for child_name, child in self.get_children(branch):
cspec_branch = None
# Process the node first.
if child_name in child_node_specs:
cspec_branch = child_node_specs[child_name]
elif "*" in child_node_specs:
cspec_branch = child_node_specs["*"]
if cspec_branch is not None:
# If it's a lazily evaluated branch, evaluate it now.
if isinstance(cspec_branch, types.LambdaType):
cspec_branch = cspec_branch(self)
# Iterate the node.
self.iterate(child_name, child, cspec_branch)
# If we've seen a node before that's only supposed to be seen a
# single time, warn about it.
if child_name in allowed_once_nodes and child_name in seen_nodes:
# Don't warn about the same node multiple times.
if child_name in warned_nodes:
continue
self.error(
err_id=("spec", "iterate", "allow_once_multiple"),
error="%s found `%s` more than once." %
(self.SPEC_NAME, child_name),
description=["%ss may only contain a single `%s` element, "
"however, it was encountered multiple times." %
(self.SPEC_NAME, child_name),
self.MORE_INFO])
continue
# Remember that we've seen this node.
seen_nodes.add(child_name)
if child_name in considered_nodes:
continue
# If the child isn't allowed, throw an error.
if child_name not in allowed_nodes and "*" not in allowed_nodes:
self._message(
spec_branch.get("unknown_node_level", "warning"),
err_id=("spec", "iterate", "not_allowed"),
message="`%s` is not a recognized element within a %s" %
(child_name, self.SPEC_NAME),
description=["While iterating a %s, a `%s` was found "
"within a %s, which is not valid." %
(self.SPEC_NAME, child_name, branch_name),
self.MORE_INFO])
|
11639
|
import json
import logging
from http import HTTPStatus
from typing import Any, Dict, List, Optional, Tuple, Type, Union
import werkzeug
from flask import Blueprint, Flask, Response, abort, jsonify
from flask.views import MethodView
from flask_cors import CORS
from gevent.pywsgi import WSGIServer
from geventwebsocket import Resource as WebsocketResource, WebSocketServer
from marshmallow import Schema
from marshmallow.exceptions import ValidationError
from webargs.flaskparser import parser
from werkzeug.exceptions import NotFound
from rotkehlchen.api.rest import RestAPI, api_response, wrap_in_fail_result
from rotkehlchen.api.v1.parser import ignore_kwarg_parser, resource_parser
from rotkehlchen.api.v1.resources import (
AaveBalancesResource,
AaveHistoryResource,
AccountingReportDataResource,
AccountingReportsResource,
AdexBalancesResource,
AdexHistoryResource,
AllAssetsResource,
AllBalancesResource,
AssetIconsResource,
AssetMovementsResource,
AssetsReplaceResource,
AssetsTypesResource,
AssetUpdatesResource,
AssociatedLocations,
AsyncTasksResource,
AvalancheTransactionsResource,
BalancerBalancesResource,
BalancerEventsHistoryResource,
BalancerTradesHistoryResource,
BinanceAvailableMarkets,
BinanceUserMarkets,
BlockchainBalancesResource,
BlockchainsAccountsResource,
BTCXpubResource,
CompoundBalancesResource,
CompoundHistoryResource,
CounterpartiesResource,
CurrentAssetsPriceResource,
DatabaseBackupsResource,
DatabaseInfoResource,
DataImportResource,
DBSnapshotDeletingResource,
DBSnapshotDownloadingResource,
DBSnapshotExportingResource,
DBSnapshotImportingResource,
DefiBalancesResource,
ERC20TokenInfo,
ERC20TokenInfoAVAX,
Eth2DailyStatsResource,
Eth2StakeDepositsResource,
Eth2StakeDetailsResource,
Eth2ValidatorsResource,
EthereumAirdropsResource,
EthereumAssetsResource,
EthereumModuleDataResource,
EthereumModuleResource,
EthereumTransactionsResource,
ExchangeBalancesResource,
ExchangeRatesResource,
ExchangesDataResource,
ExchangesResource,
ExternalServicesResource,
HistoricalAssetsPriceResource,
HistoryActionableItemsResource,
HistoryBaseEntryResource,
HistoryDownloadingResource,
HistoryExportingResource,
HistoryProcessingResource,
HistoryStatusResource,
IgnoredActionsResource,
IgnoredAssetsResource,
InfoResource,
LedgerActionsResource,
LiquityStakingHistoryResource,
LiquityStakingResource,
LiquityTrovesHistoryResource,
LiquityTrovesResource,
LoopringBalancesResource,
MakerdaoDSRBalanceResource,
MakerdaoDSRHistoryResource,
MakerdaoVaultDetailsResource,
MakerdaoVaultsResource,
ManuallyTrackedBalancesResource,
MessagesResource,
NamedEthereumModuleDataResource,
NamedOracleCacheResource,
NFTSBalanceResource,
NFTSResource,
OraclesResource,
OwnedAssetsResource,
PeriodicDataResource,
PickleDillResource,
PingResource,
QueriedAddressesResource,
ReverseEnsResource,
SettingsResource,
StakingResource,
StatisticsAssetBalanceResource,
StatisticsNetvalueResource,
StatisticsRendererResource,
StatisticsValueDistributionResource,
SushiswapBalancesResource,
SushiswapEventsHistoryResource,
SushiswapTradesHistoryResource,
TagsResource,
TradesResource,
UniswapBalancesResource,
UniswapEventsHistoryResource,
UniswapTradesHistoryResource,
UserAssetsResource,
UserPasswordChangeResource,
UserPremiumKeyResource,
UserPremiumSyncResource,
UsersByNameResource,
UsersResource,
WatchersResource,
YearnVaultsBalancesResource,
YearnVaultsHistoryResource,
YearnVaultsV2BalancesResource,
YearnVaultsV2HistoryResource,
create_blueprint,
)
from rotkehlchen.api.websockets.notifier import RotkiNotifier, RotkiWSApp
from rotkehlchen.logging import RotkehlchenLogsAdapter
URLS = List[
Union[
Tuple[str, Type[MethodView]],
Tuple[str, Type[MethodView], str],
]
]
URLS_V1: URLS = [
('/users', UsersResource),
('/watchers', WatchersResource),
('/users/<string:name>', UsersByNameResource),
('/users/<string:name>/password', UserPasswordChangeResource),
('/premium', UserPremiumKeyResource),
('/premium/sync', UserPremiumSyncResource),
('/settings', SettingsResource),
('/tasks/', AsyncTasksResource),
('/tasks/<int:task_id>', AsyncTasksResource, 'specific_async_tasks_resource'),
('/exchange_rates', ExchangeRatesResource),
('/external_services/', ExternalServicesResource),
('/oracles', OraclesResource),
('/oracles/<string:oracle>/cache', NamedOracleCacheResource),
('/exchanges', ExchangesResource),
('/exchanges/balances', ExchangeBalancesResource),
(
'/exchanges/balances/<string:location>',
ExchangeBalancesResource,
'named_exchanges_balances_resource',
),
('/assets/<string:asset>/icon', AssetIconsResource),
('/trades', TradesResource),
('/ledgeractions', LedgerActionsResource),
('/asset_movements', AssetMovementsResource),
('/tags', TagsResource),
('/exchanges/binance/pairs', BinanceAvailableMarkets),
('/exchanges/binance/pairs/<string:name>', BinanceUserMarkets),
('/exchanges/data/', ExchangesDataResource),
('/exchanges/data/<string:location>', ExchangesDataResource, 'named_exchanges_data_resource'),
('/balances/blockchains', BlockchainBalancesResource),
(
'/balances/blockchains/<string:blockchain>',
BlockchainBalancesResource,
'named_blockchain_balances_resource',
),
('/balances/', AllBalancesResource),
('/balances/manual', ManuallyTrackedBalancesResource),
('/statistics/netvalue', StatisticsNetvalueResource),
('/statistics/balance/<string:asset>', StatisticsAssetBalanceResource),
('/statistics/value_distribution', StatisticsValueDistributionResource),
('/statistics/renderer', StatisticsRendererResource),
('/messages/', MessagesResource),
('/periodic/', PeriodicDataResource),
('/history/', HistoryProcessingResource),
('/history/status', HistoryStatusResource),
('/history/export/', HistoryExportingResource),
('/history/download/', HistoryDownloadingResource),
('/history/events', HistoryBaseEntryResource),
('/history/actionable_items', HistoryActionableItemsResource),
('/reports/', AccountingReportsResource),
(
'/reports/<int:report_id>',
AccountingReportsResource,
'per_report_resource',
),
(
'/reports/<int:report_id>/data',
AccountingReportDataResource,
'per_report_data_resource',
),
('/queried_addresses', QueriedAddressesResource),
('/blockchains/ETH/transactions', EthereumTransactionsResource),
(
'/blockchains/ETH/transactions/<string:address>',
EthereumTransactionsResource,
'per_address_ethereum_transactions_resource',
),
('/blockchains/ETH2/validators', Eth2ValidatorsResource),
('/blockchains/ETH2/stake/deposits', Eth2StakeDepositsResource),
('/blockchains/ETH2/stake/details', Eth2StakeDetailsResource),
('/blockchains/ETH2/stake/dailystats', Eth2DailyStatsResource),
('/blockchains/ETH/defi', DefiBalancesResource),
('/blockchains/ETH/airdrops', EthereumAirdropsResource),
('/blockchains/ETH/erc20details/', ERC20TokenInfo),
('/blockchains/ETH/modules/<string:module_name>/data', NamedEthereumModuleDataResource),
('/blockchains/ETH/modules/data', EthereumModuleDataResource),
('/blockchains/ETH/modules/data/counterparties', CounterpartiesResource),
('/blockchains/ETH/modules/', EthereumModuleResource),
('/blockchains/ETH/modules/makerdao/dsrbalance', MakerdaoDSRBalanceResource),
('/blockchains/ETH/modules/makerdao/dsrhistory', MakerdaoDSRHistoryResource),
('/blockchains/ETH/modules/makerdao/vaults', MakerdaoVaultsResource),
('/blockchains/ETH/modules/makerdao/vaultdetails', MakerdaoVaultDetailsResource),
('/blockchains/ETH/modules/aave/balances', AaveBalancesResource),
('/blockchains/ETH/modules/aave/history', AaveHistoryResource),
('/blockchains/ETH/modules/adex/balances', AdexBalancesResource),
('/blockchains/ETH/modules/adex/history', AdexHistoryResource),
('/blockchains/ETH/modules/balancer/balances', BalancerBalancesResource),
('/blockchains/ETH/modules/balancer/history/trades', BalancerTradesHistoryResource),
('/blockchains/ETH/modules/balancer/history/events', BalancerEventsHistoryResource),
('/blockchains/ETH/modules/compound/balances', CompoundBalancesResource),
('/blockchains/ETH/modules/compound/history', CompoundHistoryResource),
('/blockchains/ETH/modules/uniswap/balances', UniswapBalancesResource),
('/blockchains/ETH/modules/uniswap/history/events', UniswapEventsHistoryResource),
('/blockchains/ETH/modules/uniswap/history/trades', UniswapTradesHistoryResource),
('/blockchains/ETH/modules/sushiswap/balances', SushiswapBalancesResource),
('/blockchains/ETH/modules/sushiswap/history/events', SushiswapEventsHistoryResource),
('/blockchains/ETH/modules/sushiswap/history/trades', SushiswapTradesHistoryResource),
('/blockchains/ETH/modules/yearn/vaults/balances', YearnVaultsBalancesResource),
('/blockchains/ETH/modules/yearn/vaults/history', YearnVaultsHistoryResource),
('/blockchains/ETH/modules/yearn/vaultsv2/balances', YearnVaultsV2BalancesResource),
('/blockchains/ETH/modules/yearn/vaultsv2/history', YearnVaultsV2HistoryResource),
('/blockchains/ETH/modules/liquity/balances', LiquityTrovesResource),
('/blockchains/ETH/modules/liquity/events/trove', LiquityTrovesHistoryResource),
('/blockchains/ETH/modules/liquity/events/staking', LiquityStakingHistoryResource),
('/blockchains/ETH/modules/liquity/staking', LiquityStakingResource),
('/blockchains/ETH/modules/pickle/dill', PickleDillResource),
('/blockchains/ETH/modules/loopring/balances', LoopringBalancesResource),
('/blockchains/<string:blockchain>', BlockchainsAccountsResource),
('/blockchains/BTC/xpub', BTCXpubResource),
('/blockchains/AVAX/transactions', AvalancheTransactionsResource),
(
'/blockchains/AVAX/transactions/<string:address>',
AvalancheTransactionsResource,
'per_address_avalanche_transactions_resource',
),
('/blockchains/AVAX/erc20details/', ERC20TokenInfoAVAX),
('/assets', OwnedAssetsResource),
('/assets/types', AssetsTypesResource),
('/assets/replace', AssetsReplaceResource),
('/assets/all', AllAssetsResource),
('/assets/ethereum', EthereumAssetsResource),
('/assets/prices/current', CurrentAssetsPriceResource),
('/assets/prices/historical', HistoricalAssetsPriceResource),
('/assets/ignored', IgnoredAssetsResource),
('/assets/updates', AssetUpdatesResource),
('/assets/user', UserAssetsResource),
('/actions/ignored', IgnoredActionsResource),
('/info', InfoResource),
('/ping', PingResource),
('/import', DataImportResource),
('/nfts', NFTSResource),
('/nfts/balances', NFTSBalanceResource),
('/database/info', DatabaseInfoResource),
('/database/backups', DatabaseBackupsResource),
('/locations/associated', AssociatedLocations),
('/staking/kraken', StakingResource),
('/snapshot/download', DBSnapshotDownloadingResource),
('/snapshot/export', DBSnapshotExportingResource),
('/snapshot/import', DBSnapshotImportingResource),
('/snapshot/delete', DBSnapshotDeletingResource),
('/ens/reverse', ReverseEnsResource),
]
logger = logging.getLogger(__name__)
log = RotkehlchenLogsAdapter(logger)
def setup_urls(
rest_api: RestAPI,
blueprint: Blueprint,
urls: URLS,
) -> None:
for url_tuple in urls:
if len(url_tuple) == 2:
route, resource_cls = url_tuple # type: ignore
endpoint = resource_cls.__name__.lower()
elif len(url_tuple) == 3:
route, resource_cls, endpoint = url_tuple # type: ignore
else:
raise ValueError(f"Invalid URL format: {url_tuple!r}")
blueprint.add_url_rule(
route,
view_func=resource_cls.as_view(endpoint, rest_api_object=rest_api),
)
def endpoint_not_found(e: NotFound) -> Response:
msg = 'invalid endpoint'
# The isinstance check is because I am not sure if `e` is always going to
# be a "NotFound" error here
if isinstance(e, NotFound):
msg = e.description
return api_response(wrap_in_fail_result(msg), HTTPStatus.NOT_FOUND)
@parser.error_handler # type: ignore
@resource_parser.error_handler
@ignore_kwarg_parser.error_handler
def handle_request_parsing_error(
err: ValidationError,
_request: werkzeug.local.LocalProxy,
_schema: Schema,
error_status_code: Optional[int], # pylint: disable=unused-argument
error_headers: Optional[Dict], # pylint: disable=unused-argument
) -> None:
""" This handles request parsing errors generated for example by schema
field validation failing."""
msg = str(err)
if isinstance(err.messages, dict):
# first key is just the location. Ignore
key = list(err.messages.keys())[0]
msg = json.dumps(err.messages[key])
elif isinstance(err.messages, list):
msg = ','.join(err.messages)
err_response = jsonify(result=None, message=msg)
err_response.status_code = HTTPStatus.BAD_REQUEST
abort(err_response)
class APIServer():
_api_prefix = '/api/1'
def __init__(
self,
rest_api: RestAPI,
ws_notifier: RotkiNotifier,
cors_domain_list: List[str] = None,
) -> None:
flask_app = Flask(__name__)
if cors_domain_list:
CORS(flask_app, origins=cors_domain_list)
blueprint = create_blueprint(self._api_prefix)
setup_urls(
blueprint=blueprint,
rest_api=rest_api,
urls=URLS_V1,
)
self.rest_api = rest_api
self.rotki_notifier = ws_notifier
self.flask_app = flask_app
self.blueprint = blueprint
self.wsgiserver: Optional[WSGIServer] = None
self.flask_app.register_blueprint(self.blueprint)
self.ws_server: Optional[WebSocketServer] = None
self.flask_app.errorhandler(HTTPStatus.NOT_FOUND)(endpoint_not_found)
self.flask_app.register_error_handler(Exception, self.unhandled_exception)
@staticmethod
def unhandled_exception(exception: Exception) -> Response:
""" Flask.errorhandler when an exception wasn't correctly handled """
log.critical(
'Unhandled exception when processing endpoint request',
exc_info=True,
exception=str(exception),
)
return api_response(wrap_in_fail_result(str(exception)), HTTPStatus.INTERNAL_SERVER_ERROR)
def run(self, host: str = '127.0.0.1', port: int = 5042, **kwargs: Any) -> None:
"""This is only used for the data faker and not used in production"""
self.flask_app.run(host=host, port=port, **kwargs)
def start(
self,
host: str = '127.0.0.1',
rest_port: int = 5042,
websockets_port: int = 5043,
) -> None:
"""This is used to start the API server in production"""
wsgi_logger = logging.getLogger(__name__ + '.pywsgi')
self.wsgiserver = WSGIServer(
listener=(host, rest_port),
application=self.flask_app,
log=wsgi_logger,
error_log=wsgi_logger,
)
msg = f'rotki REST API server is running at: {host}:{rest_port}'
print(msg)
log.info(msg)
self.wsgiserver.start()
self.ws_server = WebSocketServer(
listener=(host, websockets_port),
application=WebsocketResource([
('^/', RotkiWSApp),
]),
debug=False,
environ={'rotki_notifier': self.rotki_notifier},
)
msg = f'rotki Websockets API server is running at: {host}:{websockets_port}'
print(msg)
log.info(msg)
self.ws_server.start()
def stop(self, timeout: int = 5) -> None:
"""Stops the API server. If handlers are running after timeout they are killed"""
if self.wsgiserver is not None:
self.wsgiserver.stop(timeout)
self.wsgiserver = None
if self.ws_server is not None:
self.ws_server.stop(timeout)
self.wsgiserver = None
self.rest_api.stop()
|
11644
|
from django.db import models
from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin, AbstractUser
from django.utils import timezone
from django.utils.translation import gettext as _
from django import forms
from django.contrib.auth.hashers import make_password
from django.contrib.auth import get_user_model
from django.contrib.auth.models import User
from phonenumber_field.modelfields import PhoneNumberField
from datetime import datetime
class CarTrip(models.Model):
class Meta:
verbose_name = _('carTrip')
verbose_name_plural = _('cartrips')
def __str__(self):
return f'{self.driver_name} Car Trip'
driver_name = models.CharField(max_length=200)
destination = models.CharField(max_length=200)
number_of_seats = models.IntegerField('number of seats')
trip_date = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
@classmethod
def create(cls , driver_name, destination, number_of_seats, trip_date):
trip = cls(driver_name= driver_name,
destination=destination,
number_of_seats=number_of_seats,
trip_date=trip_date,
pub_date=datetime.now()
)
return trip
def was_published_recently(self):
now = timezone.now()
return now - datetime.timedelta(days=1) <= self.pub_date <= now
class Relation(models.Model):
class Meta:
verbose_name = _('relation')
verbose_name_plural = _('relation')
trip_number = models.IntegerField('trip_number')
hiker_name = models.CharField(max_length=200)
def __str__(self ):
return f'{self.hiker_name} going on trip id = {self.trip_number}'
@classmethod
def create(cls , trip_number, hiker_name):
rel = cls(trip_number=trip_number,
hiker_name=hiker_name,
)
return rel
|
11671
|
import threading, queue, time, os, pickle
# from queue import Queue
import numpy as np
import tensorflow as tf
import sarnet_td3.common.tf_util as U
from tensorflow.python.keras.backend import set_session
lock = threading.Lock()
class MultiTrainTD3(threading.Thread):
def __init__(self, input_queue, output_queue, args=(), kwargs=None):
threading.Thread.__init__(self, args=(), kwargs=None)
self.input_queue = input_queue
self.output_queue = output_queue
self.daemon = True
self.trainers = args[0]
self.args = args[1]
self.buffer_op = args[2]
self.num_env = args[3]
self.sess = args[4]
self.num_agents = args[5]
self.num_adversaries = args[6]
self.ep_rewards = [[0.0] for _ in range(self.num_env)]
self.ep_end_rewards = [[0.0] for _ in range(self.num_env)]
self.ep_success = [[0.0] for _ in range(self.num_env)]
self.agent_rewards = [[[0.0] for _ in range(self.num_agents)] for _ in range(self.num_env)]
self.agent_info = [[[[]] for i in range(self.num_agents)] for _ in range(self.num_env)]
# self.agent_info = [[[[]]] for _ in range(self.num_env)]
self.final_ep_rewards = [] # Shape: (batch, #) sum of rewards for training curve
self.final_ep_end_rewards = []
self.final_ep_ag_rewards = [] # agent rewards for training curve
self.save_rate = self.args.max_episode_len * 100
self.save_n_ep = self.num_env * 10
self.print_step = -int(self.save_n_ep / self.num_env)
self.q_h_init = np.zeros(shape=(self.num_env, self.args.critic_units))
self.mem_init = np.zeros(shape=(self.num_env, self.args.value_units))
self.time_prev = time.time()
def run(self):
# print(threading.currentThread().getName(), self.receive_messages)
with self.sess.as_default():
# Freeze graph to avoid memory leaks
# self.sess.graph.finalize()
while True:
try:
action, p_index, data = self.input_queue.get()
if action is "None": # If you send `None`, the thread will exit.
return
elif action is "get_action":
out = self.get_action(data, p_index)
self.output_queue.put(out)
elif action is "get_qdebug":
out = self.get_qdebug(data, p_index)
self.output_queue.put(out)
elif action is "get_loss":
out = self.get_loss(data, p_index)
self.output_queue.put(out)
elif action is "write_tboard":
self.write_tboard(data)
elif action is "add_to_buffer":
self.buffer_op.collect_exp(data)
elif action is "save_rew_info":
self.save_rew_info(data)
elif action is "save_benchmark":
out = self.save_benchmark(data)
self.output_queue.put(out)
elif action is "reset_rew_info":
self.reset_rew_info()
elif action is "save_model_rew":
if not (self.args.benchmark or self.args.display):
self.save_model(data)
self.plot_rewards(data)
except queue.Empty:
continue
def get_action(self, data, p_index):
with lock:
agent = self.trainers[p_index]
obs_n_t, h_n_t, c_n_t, mem_n_t, q1_h_t, is_train = data
obs_n_t = np.stack(obs_n_t, axis=-2) # This returns [agent, batch, dim]
obs_n_t = np.expand_dims(obs_n_t, axis=1) # This adds [agent, time, batch, dim]
p_input_j = agent.prep_input(obs_n_t, h_n_t, c_n_t, mem_n_t, q1_h_t[p_index], is_train)
# print(np.shape(obs_n_t))
act_j_t, state_j_t1, mem_j_t1, attn_j_t = agent.action(p_input_j, is_train)
if self.args.encoder_model == "LSTM" or self.args.encoder_model != "DDPG":
c_j_t1, h_j_t1 = state_j_t1
else:
h_j_t1 = state_j_t1
c_j_t1 = state_j_t1
if agent.comm_type in {"DDPG", "COMMNET", "IC3NET"}:
mem_j_t1 = np.zeros(shape=(self.num_env, self.args.value_units))
return act_j_t, h_j_t1, c_j_t1, mem_j_t1, attn_j_t
def get_qdebug(self, data, p_index):
with lock:
# with sess.as_default():
agent = self.trainers[p_index]
obs_n_t, action_n_t, q1_h_n_t, q2_h_n_t = data
obs_n_t = np.stack(obs_n_t, axis=-2) # This returns [agent, batch, dim]
obs_n_t = np.expand_dims(obs_n_t, axis=1) # This adds [agent, time, batch, dim]
q1_j_input = agent.prep_q_input(obs_n_t, action_n_t, q1_h_n_t[p_index])
_, q1_h_j_t1 = agent.q1_debug['q_values'](*(q1_j_input))
if self.args.td3:
q2_input = agent.prep_q_input(obs_n_t, action_n_t, q2_h_n_t[p_index])
_, q2_h_j_t1 = agent.q2_debug['q_values'](*(q2_input))
else:
q2_h_j_t1 = []
return q1_h_j_t1, q2_h_j_t1
def get_loss(self, data, p_index):
with lock:
# with sess.as_default():
agent = self.trainers[p_index]
train_step = data
loss = agent.update(self.trainers, self.buffer_op, train_step)
return loss
def write_tboard(self, data):
with lock:
loss, train_step, writer, summary_ops, summary_vars, num_agents = data
# Tensorboard
episode_b_rewards = []
for j in range(self.num_env):
if self.args.env_type == "mpe":
episode_b_rewards.append(np.mean(self.ep_rewards[j][self.print_step:]))
else:
episode_b_rewards.append(np.mean(self.ep_success[j][self.print_step:]))
episode_b_rewards = np.mean(np.array(episode_b_rewards))
num_steps = train_step * self.num_env
# Add to tensorboard only when actor agent is updated
if loss[0][1] is not None:
fd = {}
for i, key in enumerate(summary_vars):
if i == 0:
fd[key] = episode_b_rewards
else:
agnt_idx = int((i - 1) / 5)
if agnt_idx == num_agents: agnt_idx -= 1
if loss[agnt_idx] is not None:
fd[key] = loss[agnt_idx][int((i - 1) % 5)]
summary_str = U.get_session().run(summary_ops, feed_dict=fd)
writer.add_summary(summary_str, num_steps)
writer.flush()
def save_rew_info(self, data):
with lock:
rew_n, info_n, ep_step = data
# rew_n (num_env, num_agents)
if self.args.env_type == "mpe":
for j in range(self.num_env):
for i, rew in enumerate(rew_n[j]):
if ep_step >= self.args.max_episode_len - 10: # Compute only last 10 episode step rewards
self.ep_end_rewards[j][-1] += rew
self.ep_rewards[j][-1] += rew
self.agent_rewards[j][i][-1] += rew
elif self.args.env_type == "ic3net":
for j in range(self.num_env):
self.ep_success[j][-1] += info_n[j]
if self.args.benchmark and self.args.env_type == "mpe":
for j in range(self.num_env):
for i, info in enumerate(info_n[j]):
self.agent_info[j][i][-1].append(info)
def reset_rew_info(self):
with lock:
for j in range(self.num_env):
self.ep_rewards[j].append(0)
self.ep_success[j].append(0)
self.ep_end_rewards[j].append(0)
for i in range(self.num_agents):
self.agent_rewards[j][i].append(0)
if self.args.benchmark:
for j in range(self.num_env):
for i in range(self.num_agents):
self.agent_info[j][i].append([[]])
def save_benchmark(self, data):
with lock:
exp_name, exp_itr = data
benchmark_dir = os.path.join('./exp_data', exp_name, exp_itr, self.args.benchmark_dir)
if not os.path.exists(benchmark_dir):
os.mkdir(benchmark_dir)
file_name = './exp_data/' + exp_name + '/' + exp_itr + '/' + self.args.benchmark_dir + '/' + exp_name + '.pkl'
print('Finished benchmarking, now saving...')
# pickle_info = [self.agent_info[j] for j in range(self.num_env)]
with open(file_name, 'wb') as fp:
# Dump files as [num_env, [# agents, [#ep, [#stps, [dim]]]]
pickle.dump(self.agent_info, fp)
return "bench_saved"
def save_model(self, data):
with lock:
# train_step = t_step * num_env
train_step, num_episodes, time_taken, exp_name, exp_itr, data_file, saver = data
# Policy File
if num_episodes % (self.save_n_ep) == 0:
save_dir = './exp_data/' + exp_name + '/' + exp_itr + '/' + self.args.save_dir + str(train_step)
U.save_state(save_dir, self.sess, saver=saver)
# episode_rewards, agent_rewards, final_ep_rewards, final_ep_ag_rewards = rewards
if self.args.env_type == "mpe":
# print statement depends on whether or not there are adversaries
if self.num_adversaries == 0:
episode_b_rewards = []
ep_end_b_rewards = []
ep_ag_b_rewards = []
for j in range(self.num_env):
episode_b_rewards.append(np.mean(self.ep_rewards[j][self.print_step:]))
ep_end_b_rewards.append(np.mean(self.ep_end_rewards[j][self.print_step:]))
episode_b_rewards = np.mean(np.array(episode_b_rewards))
ep_end_b_rewards = np.mean(ep_end_b_rewards) / 10.
for i in range(self.num_agents):
temp_ag_reward = []
for j in range(self.num_env):
temp_ag_reward.append(np.mean(self.agent_rewards[j][i][self.print_step:]))
ep_ag_b_rewards.append(np.mean(np.array(temp_ag_reward)))
print("steps: {}, episodes: {}, mean episode reward: {}, mean end rewards: {}, time: {}".format(
train_step, num_episodes, episode_b_rewards, ep_end_b_rewards, round(time.time() - self.time_prev, 3)))
with open(data_file, "a+") as f:
f.write("\n" + "steps: {}, episodes: {}, mean episode reward: {}, mean end rewards: {}, time: {}".format(
train_step, num_episodes, episode_b_rewards, ep_end_b_rewards, round(time.time() - self.time_prev, 3)) + "\n")
else:
episode_b_rewards = []
ep_end_b_rewards = []
ep_ag_b_rewards = []
for j in range(self.num_env):
episode_b_rewards.append(np.mean(self.ep_rewards[j][self.print_step:]))
ep_end_b_rewards.append(np.mean(self.ep_end_rewards[j][self.print_step:]))
episode_b_rewards = np.mean(np.array(episode_b_rewards))
ep_end_b_rewards = np.mean(ep_end_b_rewards)
for i in range(self.num_agents):
temp_ag_reward = []
for j in range(self.num_env):
temp_ag_reward.append(np.mean(self.agent_rewards[j][i][self.print_step:]))
ep_ag_b_rewards.append(np.mean(np.array(temp_ag_reward)))
print("steps: {}, episodes: {}, mean episode reward: {}, mean end rewards: {}, agent episode reward: {}, time: {}".format(
train_step, num_episodes, episode_b_rewards, ep_end_b_rewards, [rew for rew in ep_ag_b_rewards],
round(time.time() - self.time_prev, 3)) + "\n")
with open(data_file, "a+") as f:
f.write("\n" + "steps: {}, episodes: {}, mean episode reward: {}, mean end rewards: {}, agent episode reward: {}, time: {}".format(
train_step, num_episodes, episode_b_rewards, ep_end_b_rewards, [rew for rew in ep_ag_b_rewards],
round(time.time() - self.time_prev, 3)) + "\n")
# Keep track of final episode reward
self.final_ep_rewards.append(episode_b_rewards)
self.final_ep_end_rewards.append(ep_end_b_rewards)
for rew in ep_ag_b_rewards:
self.final_ep_ag_rewards.append(rew)
self.time_prev = time.time()
def plot_rewards(self, data):
with lock:
train_step, num_episodes, t_start, exp_name, exp_itr, data_file, saver = data
plot_dir = os.path.join('./exp_data', exp_name, exp_itr, self.args.plots_dir)
if not os.path.exists(plot_dir):
os.mkdir(plot_dir)
rew_file_name = './exp_data/' + exp_name + '/' + exp_itr + '/' + self.args.plots_dir + '/' + exp_name + '_rewards.pkl'
with open(rew_file_name, 'wb') as fp:
pickle.dump(self.final_ep_rewards, fp)
rew_ep_end_file_name = './exp_data/' + exp_name + '/' + exp_itr + '/' + self.args.plots_dir + '/' + exp_name + '_rewards_ep_end.pkl'
with open(rew_ep_end_file_name, 'wb') as fp:
pickle.dump(self.final_ep_end_rewards, fp)
agrew_file_name = './exp_data/' + exp_name + '/' + exp_itr + '/' + self.args.plots_dir + '/' + exp_name + '_agrewards.pkl'
with open(agrew_file_name, 'wb') as fp:
pickle.dump(self.final_ep_ag_rewards, fp)
"""
REINFORCE Threads
"""
class MultiTrainVPG(threading.Thread):
def __init__(self, input_queue, output_queue, args=(), kwargs=None):
threading.Thread.__init__(self, args=(), kwargs=None)
self.input_queue = input_queue
self.output_queue = output_queue
self.daemon = True
self.trainers = args[0]
self.args = args[1]
self.buffer_op = args[2]
self.num_env = args[3]
self.sess = args[4]
self.num_agents = args[5]
self.num_adversaries = args[6]
self.ep_rewards = [[0.0] for _ in range(self.num_env)]
self.ep_success = [[0.0] for _ in range(self.num_env)]
self.agent_rewards = [[[0.0] for _ in range(self.num_agents)] for _ in range(self.num_env)]
self.agent_info = [[[[]]] for _ in range(self.num_env)]
self.final_ep_rewards = [] # Shape: (batch, #) sum of rewards for training curve
self.final_ep_ag_rewards = [] # agent rewards for training curve
self.save_rate = self.args.max_episode_len * 100
if self.args.env_type == "mpe":
self.print_step = -int(self.save_rate / self.num_env)
else: # print for episode end only (success rate)
self.print_step = -int(self.save_rate / (self.num_env * self.args.max_episode_len))
self.q_h_init = np.zeros(shape=(self.num_env, self.args.critic_units))
self.mem_init = np.zeros(shape=(self.num_env, self.args.value_units))
self.time_prev = time.time()
def run(self):
# print(threading.currentThread().getName(), self.receive_messages)
with self.sess.as_default():
# Freeze graph to avoid memory leaks
# self.sess.graph.finalize()
while True:
try:
action, p_index, data = self.input_queue.get()
if action is "None": # If you send `None`, the thread will exit.
return
elif action is "get_action":
out = self.get_action(data, p_index)
self.output_queue.put(out)
elif action is "get_loss":
out = self.get_loss(data, p_index)
self.output_queue.put(out)
elif action is "write_tboard":
self.write_tboard(data)
elif action is "add_to_buffer":
self.buffer_op.collect_exp(data)
elif action is "add_to_buffer_reinforce":
self.buffer_op.collect_exp(data)
elif action is "save_rew_info":
self.save_rew_info(data)
elif action is "save_benchmark":
out = self.save_benchmark(data)
self.output_queue.put(out)
elif action is "reset_rew_info":
self.reset_rew_info()
elif action is "save_model_rew":
if not (self.args.benchmark or self.args.display):
self.save_model(data)
self.plot_rewards(data)
except queue.Empty:
continue
def get_action(self, data, p_index):
with lock:
agent = self.trainers[p_index]
obs_n_t, h_n_t, c_n_t, mem_n_t, is_train = data
obs_n_t = np.stack(obs_n_t, axis=-2)
obs_n_t = np.expand_dims(obs_n_t, axis=1) # This adds [agent, time, batch, dim]
p_input_j = agent.prep_input(obs_n_t, h_n_t, c_n_t, mem_n_t, is_train)
act_j_t, act_soft_j_t, state_j_t1, mem_j_t1, attn_j_t, value_j_t = agent.action(p_input_j, is_train)
if self.args.encoder_model == "LSTM":
c_j_t1, h_j_t1 = state_j_t1
else:
h_j_t1 = state_j_t1
c_j_t1 = state_j_t1
if agent.comm_type in {"DDPG", "COMMNET", "IC3NET"}:
mem_j_t1 = np.zeros(shape=(self.num_env, self.args.value_units))
return act_j_t, act_soft_j_t, h_j_t1, c_j_t1, mem_j_t1, attn_j_t, value_j_t
def get_loss(self, data, p_index):
with lock:
# with sess.as_default():
train_step, buffer_data = data
agent = self.trainers[p_index]
loss = agent.update(self.trainers, buffer_data, train_step)
return loss
def write_tboard(self, data):
with lock:
loss, train_step, writer, summary_ops, summary_vars, num_agents = data
# Tensorboard
episode_b_rewards = []
for j in range(self.num_env):
if self.args.env_type == "mpe":
episode_b_rewards.append(np.mean(self.ep_rewards[j][self.print_step:]))
else:
episode_b_rewards.append(np.mean(self.ep_success[j][self.print_step:]))
episode_b_rewards = np.mean(np.array(episode_b_rewards))
num_steps = train_step * self.num_env
# Add to tensorboard only when actor agent is updated
if loss[0][1] is not None:
fd = {}
for i, key in enumerate(summary_vars):
if i == 0:
fd[key] = episode_b_rewards
else:
agnt_idx = int((i - 1) / 5)
if agnt_idx == num_agents: agnt_idx -= 1
if loss[agnt_idx] is not None:
fd[key] = loss[agnt_idx][int((i - 1) % 5)]
summary_str = U.get_session().run(summary_ops, feed_dict=fd)
writer.add_summary(summary_str, num_steps)
writer.flush()
def save_rew_info(self, data):
with lock:
rew_n, info_n, terminal = data
if self.args.env_type == "mpe":
for j in range(self.num_env):
for i, rew in enumerate(rew_n[j]):
self.ep_rewards[j][-1] += rew
self.agent_rewards[j][i][-1] += rew
elif self.args.env_type == "ic3net":
for j in range(self.num_env):
self.ep_success[j][-1] += info_n[j]
if self.args.benchmark and self.args.env_type == "mpe":
for j in range(self.num_env):
for i, info in enumerate(info_n[j]):
self.agent_info[-1][i].append(info_n[0]['n'])
def reset_rew_info(self):
with lock:
for j in range(self.num_env):
self.ep_rewards[j].append(0)
self.ep_success[j].append(0)
for i in range(self.num_agents):
self.agent_rewards[j][i].append(0)
if self.args.benchmark:
for j in range(self.num_env):
self.agent_info[j].append([[]])
def save_benchmark(self, data):
with lock:
exp_name, exp_itr = data
benchmark_dir = os.path.join('./exp_data', exp_name, exp_itr, self.args.benchmark_dir)
if not os.path.exists(benchmark_dir):
os.mkdir(benchmark_dir)
file_name = './exp_data/' + exp_name + '/' + exp_itr + '/' + self.args.benchmark_dir + '/' + exp_name + '.pkl'
print('Finished benchmarking, now saving...')
with open(file_name, 'wb') as fp:
pickle.dump(self.ep_success, fp)
return "bench_saved"
def save_model(self, data):
with lock:
# train_step = t_step * num_env
train_step, num_episodes, time_taken, exp_name, exp_itr, data_file, saver = data
# Policy File
save_dir = './exp_data/' + exp_name + '/' + exp_itr + '/' + self.args.save_dir + str(train_step)
U.save_state(save_dir, self.sess, saver=saver)
episode_b_success = []
for j in range(self.num_env):
episode_b_success.append(np.mean(self.ep_success[j][self.print_step:]))
episode_b_success = np.mean(np.array(episode_b_success)) / self.args.max_episode_len
print("steps: {}, episodes: {}, mean episode success: {}, time: {}".format(
train_step, num_episodes, episode_b_success, round(time.time() - self.time_prev, 3)) + "\n")
with open(data_file, "a+") as f:
f.write("\n" + "steps: {}, episodes: {}, mean episode success: {}, time: {}".format(
train_step, num_episodes, episode_b_success, round(time.time() - self.time_prev, 3)) + "\n")
self.final_ep_rewards.append(episode_b_success)
def plot_rewards(self, data):
with lock:
train_step, num_episodes, t_start, exp_name, exp_itr, data_file, saver = data
plot_dir = os.path.join('./exp_data', exp_name, exp_itr, self.args.plots_dir)
if not os.path.exists(plot_dir):
os.mkdir(plot_dir)
rew_file_name = './exp_data/' + exp_name + '/' + exp_itr + '/' + self.args.plots_dir + '/' + exp_name + '_rewards.pkl'
with open(rew_file_name, 'wb') as fp:
pickle.dump(self.final_ep_rewards, fp)
def get_gputhreads(trainers, args, buffer_op, num_env, num_agents, num_adv):
threads = []
sess = tf.compat.v1.get_default_session()
for t in range(args.num_gpu_threads):
input_q = queue.Queue()
output_q = queue.Queue()
if args.policy_grad == "maddpg":
threads.append(MultiTrainTD3(input_q, output_q, args=(trainers, args, buffer_op, num_env, sess, num_agents, num_adv)))
elif args.policy_grad == "reinforce":
threads.append(
MultiTrainVPG(input_q, output_q, args=(trainers, args, buffer_op, num_env, sess, num_agents, num_adv)))
threads[t].start()
time.sleep(1)
return threads
def close_gputhreads(threads):
for t in threads:
t.input_queue.put(("None", None, None))
for t in threads:
t.join()
print('GPU trainers cancelled')
return
|
11690
|
import numpy as np
from stable_baselines import PPO2
from stable_baselines.common.policies import CnnPolicy
from stable_baselines.a2c.utils import conv, linear, conv_to_fc
from src.envs import CMDP, FrozenLakeEnvCustomMap
from src.envs.frozen_lake.frozen_maps import MAPS
from src.students import LagrangianStudent, identity_transfer
from src.online_learning import ExponetiatedGradient
from src.teacher import FrozenLakeEvaluationLogger, TeacherFrozenEnv, \
create_intervention, SmallFrozenTeacherEnv
from src.teacher.frozen_lake_env import SmallFrozenTrainingObservation, SmallFrozenNonStationaryBandits
from src.envs.frozen_lake.utils import create_intervention_from_map, \
OptimalAgent, add_teacher
import tensorflow as tf
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
__all__ = ['create_teacher_env', 'small_base_cenv_fn']
def constraint(info=None, **kwargs):
return {'g': float(info['next_state_type'] in 'H')}
def small_base_env_fn():
# Base MDP
world_map = MAPS['small']
not_slipping_prob = 0.8
env_kwargs = dict(desc=world_map,
not_slipping_prob=not_slipping_prob,
base_r_mapping=None,
timeout=200)
return FrozenLakeEnvCustomMap(**env_kwargs)
# Base CMDP
def small_base_cenv_fn():
return CMDP(small_base_env_fn(), constraint,
constraints_values=[0],
n_constraints=1,
avg_constraint=True)
def make_base_small_cenvs():
# Base MDP
world_map = MAPS['small']
# # 2 interventions
# dist = [1, 1]
# tau = [0.1, 0]
# buff_size = [1, 0]
# avg_constraint = [True, True]
# 3 Interventions
dist = [2, 1, 1]
tau = [0.1, 0.1, 0]
buff_size = [1, 1, 0]
avg_constraint = [True, True, True]
interventions = []
for d, t, b, avg in zip(dist, tau, buff_size, avg_constraint):
interventions.append(
create_intervention(
small_base_cenv_fn,
create_intervention_from_map(add_teacher(world_map, d)),
[t], b, use_vec=True, avg_constraint=avg)
)
assert callable(interventions[0])
test_env = create_intervention(
small_base_cenv_fn(), create_intervention_from_map(add_teacher(
world_map)),
[0.0], 0, avg_constraint=True)
return interventions, test_env
############################## TEACHER ENV ###################################
def my_small_cnn(scaled_images, **kwargs):
activ = tf.nn.relu
layer_1 = activ(conv(scaled_images, 'c1', n_filters=32, filter_size=3,
stride=1, **kwargs))
layer_2 = activ(conv(layer_1, 'c2', n_filters=64, filter_size=3,
stride=1, **kwargs))
layer_3 = conv_to_fc(layer_2)
return activ(
linear(layer_3, 'fc1', n_hidden=32, init_scale=np.sqrt(2)))
def create_teacher_env(new_br_kwargs={}, new_online_kwargs={},
original=False, obs_from_training=False,
non_stationary_bandit=False):
# Student definition
br_kwargs = dict(policy=CnnPolicy, verbose=0, n_steps=128,
ent_coef=0.05, cliprange=0.2, learning_rate=1e-3,
noptepochs=9,
policy_kwargs={'cnn_extractor': my_small_cnn})
br_kwargs.update(new_br_kwargs)
# Define online kwargs
online_kwargs = dict(B=0.5, eta=1.0)
online_kwargs.update(new_online_kwargs)
student_cls = LagrangianStudent
n_envs = 4
use_sub_proc_env = False
student_default_kwargs = {'env': None,
'br_algo': PPO2,
'online_algo': ExponetiatedGradient,
'br_kwargs': br_kwargs,
'online_kwargs': online_kwargs,
'lagrangian_ronuds': 2,
'curriculum_transfer': identity_transfer,
'br_uses_vec_env': True,
'use_sub_proc_env': use_sub_proc_env,
'n_envs': n_envs,
}
student_ranges_dict = {}
# Teacher interventions
if original:
# To preserve the teacher env interface while training in the
# original environment, we introduce a dummy intervention
# condition that is always False.
def dummy_intervention(**kwargs):
return 0
_, test_env = make_base_small_cenvs()
intervention = create_intervention(
base_cenv=small_base_cenv_fn,
interventions=[dummy_intervention], taus=[0], buf_size=0,
use_vec=True, avg_constraint=True)
interventions = [intervention]
else:
interventions, test_env = make_base_small_cenvs()
learning_steps = 4800 * 2
time_steps_lim = learning_steps * 10
test_episode_timeout = 200
test_episode_number = 5
if obs_from_training:
env_cls = SmallFrozenTrainingObservation
elif non_stationary_bandit:
env_cls = SmallFrozenNonStationaryBandits
else:
env_cls = SmallFrozenTeacherEnv
return env_cls(student_cls=student_cls,
student_default_kwargs=student_default_kwargs,
interventions=interventions,
final_env=test_env,
logger_cls=FrozenLakeEvaluationLogger,
student_ranges_dict=student_ranges_dict,
learning_steps=learning_steps,
test_episode_number=test_episode_number,
test_episode_timeout=test_episode_timeout,
time_steps_lim=time_steps_lim,
normalize_obs=False)
|
11716
|
import numpy
import pytest
import os
from shutil import rmtree
from numpy.testing import assert_allclose
import scipy.stats
import scipy.integrate
import scipy.special
from fgivenx.mass import PMF, compute_pmf
def gaussian_pmf(y, mu=0, sigma=1):
return scipy.special.erfc(numpy.abs(y-mu)/numpy.sqrt(2)/sigma)
def test_gaussian():
numpy.random.seed(0)
nsamp = 5000
samples = numpy.random.randn(nsamp)
y = numpy.random.uniform(-3, 3, 10)
m = PMF(samples, y)
m_ = gaussian_pmf(y)
assert_allclose(m, m_, rtol=3e-1)
def test_PMF():
# Compute samples
numpy.random.seed(0)
nsamp = 100
samples = numpy.concatenate((-5+numpy.random.randn(nsamp//2),
5+numpy.random.randn(nsamp//2)))
# Compute PMF
y = numpy.random.uniform(-10, 10, 10)
m = PMF(samples, y)
# Compute PMF via monte carlo
N = 100000
kernel = scipy.stats.gaussian_kde(samples)
s = kernel.resample(N)[0]
m_ = [sum(kernel(s) <= kernel(y_i))/float(N) for y_i in y]
assert_allclose(m, m_, atol=3*N**-0.5)
# Compute PMF via quadrature
m_ = [scipy.integrate.quad(lambda x: kernel(x)*(kernel(x) <= kernel(y_i)),
-numpy.inf, numpy.inf, limit=500)[0]
for y_i in y]
assert_allclose(m, m_, atol=1e-4)
assert_allclose([0, 0], PMF(samples, [-1e3, 1e3]))
samples = [0, 0]
m = PMF(samples, y)
assert_allclose(m, numpy.zeros_like(y))
def test_compute_pmf():
with pytest.raises(TypeError):
compute_pmf(None, None, wrong_argument=None)
cache = '.test_cache/test'
numpy.random.seed(0)
nsamp = 5000
a, b, e, f = 0, 1, 0, 1
m = numpy.random.normal(a, b, nsamp)
c = numpy.random.normal(e, f, nsamp)
nx = 100
x = numpy.linspace(-1, 1, nx)
fsamps = (numpy.outer(x, m) + c)
ny = 100
y = numpy.linspace(-3, 3, ny)
assert(not os.path.isfile(cache + '_masses.pkl'))
m = compute_pmf(fsamps, y, cache=cache)
assert(os.path.isfile(cache + '_masses.pkl'))
m_ = [gaussian_pmf(y, a*xi+e, numpy.sqrt(b**2*xi**2+f**2)) for xi in x]
assert_allclose(m.transpose(), m_, atol=3e-1)
m = compute_pmf(fsamps, y, cache=cache)
assert_allclose(m.transpose(), m_, atol=3e-1)
rmtree('.test_cache')
|
11734
|
import re
import time
from lemoncheesecake.events import TestSessionSetupEndEvent, TestSessionTeardownEndEvent, \
TestEndEvent, SuiteSetupEndEvent, SuiteTeardownEndEvent, SuiteEndEvent, SteppedEvent
from lemoncheesecake.reporting.report import ReportLocation
DEFAULT_REPORT_SAVING_STRATEGY = "at_each_failed_test"
def _is_end_of_result_event(event):
if isinstance(event, TestEndEvent):
return ReportLocation.in_test(event.test)
if isinstance(event, SuiteSetupEndEvent):
return ReportLocation.in_suite_setup(event.suite)
if isinstance(event, SuiteTeardownEndEvent):
return ReportLocation.in_suite_teardown(event.suite)
if isinstance(event, TestSessionSetupEndEvent):
return ReportLocation.in_test_session_setup()
if isinstance(event, TestSessionTeardownEndEvent):
return ReportLocation.in_test_session_teardown()
return None
def save_at_each_suite_strategy(event, _):
return isinstance(event, SuiteEndEvent)
def save_at_each_test_strategy(event, _):
return _is_end_of_result_event(event) is not None
def save_at_each_failed_test_strategy(event, report):
location = _is_end_of_result_event(event)
if location:
result = report.get(location)
return result and result.status == "failed"
else:
return False
def save_at_each_log_strategy(event, _):
return isinstance(event, SteppedEvent)
class SaveAtInterval(object):
def __init__(self, interval):
self.interval = interval
self.last_saving = None
def __call__(self, event, report):
now = time.time()
if self.last_saving:
must_be_saved = now > self.last_saving + self.interval
if must_be_saved:
self.last_saving = now
return must_be_saved
else:
self.last_saving = now # not a saving but an initialization
return False
def make_report_saving_strategy(expression):
# first, try with a static expression
static_expressions = {
"at_end_of_tests": None, # no need to an intermediate report saving in this case
"at_each_suite": save_at_each_suite_strategy,
"at_each_test": save_at_each_test_strategy,
"at_each_failed_test": save_at_each_failed_test_strategy,
"at_each_log": save_at_each_log_strategy,
"at_each_event": save_at_each_log_strategy # deprecated since 1.4.5, "at_each_log" must be used instead
}
try:
return static_expressions[expression]
except KeyError:
pass
# second, try with "every_Ns"
m = re.compile(r"^every[_ ](\d+)s$").match(expression)
if m:
return SaveAtInterval(int(m.group(1)))
# ok... nothing we know about
raise ValueError("Invalid expression '%s' for report saving strategy" % expression)
|
11750
|
import pyblish.api
import avalon.api
from openpype.api import version_up
from openpype.action import get_errored_plugins_from_data
class IncrementCurrentFile(pyblish.api.InstancePlugin):
"""Increment the current file.
Saves the current scene with an increased version number.
"""
label = "Increment current file"
order = pyblish.api.IntegratorOrder + 9.0
hosts = ["houdini"]
families = ["colorbleed.usdrender", "redshift_rop"]
targets = ["local"]
def process(self, instance):
# This should be a ContextPlugin, but this is a workaround
# for a bug in pyblish to run once for a family: issue #250
context = instance.context
key = "__hasRun{}".format(self.__class__.__name__)
if context.data.get(key, False):
return
else:
context.data[key] = True
context = instance.context
errored_plugins = get_errored_plugins_from_data(context)
if any(
plugin.__name__ == "HoudiniSubmitPublishDeadline"
for plugin in errored_plugins
):
raise RuntimeError(
"Skipping incrementing current file because "
"submission to deadline failed."
)
# Filename must not have changed since collecting
host = avalon.api.registered_host()
current_file = host.current_file()
assert (
context.data["currentFile"] == current_file
), "Collected filename from current scene name."
new_filepath = version_up(current_file)
host.save(new_filepath)
|
11773
|
import unittest, tempfile
from pygromos.simulations.hpc_queuing.job_scheduling.schedulers import simulation_scheduler
from pygromos.data.simulation_parameters_templates import template_md
from pygromos.data.topology_templates import blank_topo_template
from pygromos.simulations.hpc_queuing.submission_systems import DUMMY
from pygromos.files.gromos_system.gromos_system import Gromos_System
from pygromos.tests.in_testfiles import in_test_file_path
from pygromos.tests.test_files import out_test_root_dir
class test_MD_scheduler(unittest.TestCase):
submissionSystem = DUMMY
def setUp(self) -> None:
self.tmp_test_dir = tempfile.mkdtemp(dir=out_test_root_dir, prefix="scheduling_Dummy_")
def test_do(self):
in_cnf = in_test_file_path+"/cnf/in_cnf1.cnf"
out_dir_path = self.tmp_test_dir
in_simSystem = Gromos_System(system_name="test_do", work_folder=out_dir_path,
in_top_path=blank_topo_template, in_cnf_path=in_cnf, in_imd_path=template_md,
in_gromosXX_bin_dir=None, in_gromosPP_bin_dir=None)
submission_system = self.submissionSystem()
simulation_scheduler.do(in_simSystem=in_simSystem, out_dir_path=out_dir_path,
submission_system=submission_system,
simulation_run_num=2, verbose= True)
|
11785
|
import unittest
import unittest.mock
from programy.storage.entities.nodes import NodesStore
class NodesStoreTest(unittest.TestCase):
def test_load(self):
store = NodesStore()
with self.assertRaises(NotImplementedError):
collector = unittest.mock.Mock()
store.load(collector)
|
11788
|
import unittest
class PrefixNotIncluded(unittest.TestCase):
def test_not_included(self):
pass
if __name__ == '__main__':
unittest.main()
|
11836
|
from django.contrib.auth import get_user_model
from rest_auth.registration.serializers import (
RegisterSerializer as BaseRegisterSerializer,
)
from rest_auth.registration.serializers import (
SocialLoginSerializer as BaseSocialLoginSerializer,
)
from rest_auth.serializers import LoginSerializer as BaseLoginSerializer
from rest_auth.serializers import (
PasswordResetConfirmSerializer as BasePasswordResetConfirmSerializer,
)
from rest_auth.serializers import UserDetailsSerializer as BaseUserDetailsSerializer
from rest_framework import serializers
from rest_framework.exceptions import ValidationError
from core.models import Profile
# noinspection PyAbstractClass
class LoginSerializer(BaseLoginSerializer):
"""
Extends the default LoginSerializer in order to return
custom error messages
"""
def validate(self, attrs):
try:
return super().validate(attrs)
except serializers.ValidationError as ex:
ex.detail = "The email or password you entered is incorrect!"
raise ex
# noinspection PyAbstractClass
class PasswordResetConfirmSerializer(BasePasswordResetConfirmSerializer):
"""
Extends the default PasswordResetConfirmSerializer in order to return
custom error messages
"""
def validate(self, attrs):
try:
return super().validate(attrs)
except serializers.ValidationError as ex:
if "new_password2" in ex.detail:
ex.detail = ex.detail["new_password2"][0]
else:
ex.detail = "Could not reset password. Reset token expired or invalid."
raise ex
# noinspection PyAbstractClass
class CustomSocialLoginSerializer(BaseSocialLoginSerializer):
"""
Extends default SocialLoginSerializer to add additional details to some
failed login attempts
"""
def validate(self, attrs):
try:
res = super().validate(attrs)
return res
except ValidationError as ex:
if "User is already registered with this e-mail address." in ex.detail:
ex.detail[0] = (
"User is already registered with this e-mail address. "
"Please login using the form above."
)
raise ex
# noinspection PyAbstractClass
class RegisterSerializer(BaseRegisterSerializer):
email = serializers.EmailField(required=True)
password = serializers.CharField(write_only=True)
first_name = serializers.CharField(write_only=True)
last_name = serializers.CharField(write_only=True)
# legacy compat
zip = serializers.CharField(write_only=True, required=False)
zipcode = serializers.CharField(write_only=True, required=False)
# Overrides the default required password fields
password1 = None
password2 = None
def get_cleaned_data(self):
return {
"username": self.validated_data.get("email", ""),
"email": self.validated_data.get("email", ""),
# allauth uses password1 internally for creation
"password1": self.validated_data.get("password", ""),
"first_name": self.validated_data.get("first_name", ""),
"last_name": self.validated_data.get("last_name", ""),
"zipcode": self.validated_data.get("zipcode", ""),
}
def validate(self, data):
return data
UserModel = get_user_model()
class ProfileSerializer(serializers.ModelSerializer):
class Meta:
model = Profile
fields = "__all__"
class UserDetailsSerializer(BaseUserDetailsSerializer):
profile = ProfileSerializer()
class Meta:
model = UserModel
fields = ("username", "email", "first_name", "last_name", "profile")
read_only_fields = ("email",)
def to_representation(self, instance: UserModel) -> dict:
"""Move fields from Profile to user representation."""
representation = super().to_representation(instance)
profile = representation.pop("profile")
representation["zipcode"] = profile["zipcode"]
representation["is_mentor"] = profile["is_mentor"]
return representation
class UserSerializer(BaseUserDetailsSerializer):
profile = ProfileSerializer()
class Meta:
model = UserModel
fields = ("username", "email", "first_name", "last_name", "profile")
read_only_fields = ("email",)
def to_representation(self, instance: UserModel) -> dict:
"""Move fields from Profile to user representation."""
representation = super().to_representation(instance)
profile = representation.pop("profile")
profile.pop("user")
for key, val in profile.items():
representation[key] = val
return representation
|
11838
|
import os
import shutil
import sys
import tarfile
def include_package(envoy_api_protos, rst_file_path, prefix):
# `envoy_api_rst_files` is a list of file paths for .proto.rst files
# generated by protodoc
#
# we are only interested in the proto files generated for envoy protos,
# not for non-envoy dependencies
if ("pkg/" + prefix) not in rst_file_path:
return None
# derive the "canonical" path from the filepath
canonical = f"{rst_file_path.split('pkg/' + prefix)[1]}"
# we are only interested in the actual v3 protos, not their dependencies
if (prefix + canonical) not in envoy_api_protos:
return None
return canonical
def main():
proto_srcs = sys.argv[1]
envoy_api_rst_files = sys.argv[1:-1]
output_filename = sys.argv[-1]
with open(proto_srcs) as f:
# the contents of `proto_srcs` are the result of a bazel genquery,
# containing bazel target rules, eg:
#
# @envoy_api//envoy/watchdog/v3:abort_action.proto
#
# this transforms them to a list with a "canonical" form of:
#
# envoy/watchdog/v3/abort_action.proto.rst
#
envoy_api_protos = [
f"{src.split('//')[1].replace(':', '/')}.rst" for src in f.read().split("\n") if src
]
for rst_file_path in envoy_api_rst_files:
canonical = include_package(envoy_api_protos, rst_file_path, "envoy/")
if canonical is None:
canonical = include_package(envoy_api_protos, rst_file_path, "contrib/envoy/")
if canonical is None:
continue
target = os.path.join("rst-out/api-v3", canonical)
if not os.path.exists(os.path.dirname(target)):
os.makedirs(os.path.dirname(target))
shutil.copy(rst_file_path, target)
# output the generated rst files to a tarfile for consumption
# by other bazel rules
with tarfile.open(output_filename, "w") as tar:
tar.add("rst-out", arcname=".")
if __name__ == "__main__":
main()
|
11866
|
from mock.mock import patch
import os
import pytest
import ca_test_common
import ceph_volume_simple_activate
fake_cluster = 'ceph'
fake_container_binary = 'podman'
fake_container_image = 'quay.ceph.io/ceph/daemon:latest'
fake_id = '42'
fake_uuid = '0c4a7eca-0c2a-4c12-beff-08a80f064c52'
fake_path = '/etc/ceph/osd/{}-{}.json'.format(fake_id, fake_uuid)
class TestCephVolumeSimpleActivateModule(object):
@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
def test_with_check_mode(self, m_exit_json):
ca_test_common.set_module_args({
'osd_id': fake_id,
'osd_fsid': fake_uuid,
'_ansible_check_mode': True
})
m_exit_json.side_effect = ca_test_common.exit_json
with pytest.raises(ca_test_common.AnsibleExitJson) as result:
ceph_volume_simple_activate.main()
result = result.value.args[0]
assert not result['changed']
assert result['cmd'] == ['ceph-volume', '--cluster', fake_cluster, 'simple', 'activate', fake_id, fake_uuid]
assert result['rc'] == 0
assert not result['stdout']
assert not result['stderr']
@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
@patch('ansible.module_utils.basic.AnsibleModule.run_command')
def test_with_failure(self, m_run_command, m_exit_json):
ca_test_common.set_module_args({
'osd_id': fake_id,
'osd_fsid': fake_uuid
})
m_exit_json.side_effect = ca_test_common.exit_json
stdout = ''
stderr = 'error'
rc = 2
m_run_command.return_value = rc, stdout, stderr
with pytest.raises(ca_test_common.AnsibleExitJson) as result:
ceph_volume_simple_activate.main()
result = result.value.args[0]
assert result['changed']
assert result['cmd'] == ['ceph-volume', '--cluster', fake_cluster, 'simple', 'activate', fake_id, fake_uuid]
assert result['rc'] == rc
assert result['stderr'] == stderr
@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
@patch('ansible.module_utils.basic.AnsibleModule.run_command')
def test_activate_all_osds(self, m_run_command, m_exit_json):
ca_test_common.set_module_args({
'osd_all': True
})
m_exit_json.side_effect = ca_test_common.exit_json
stdout = ''
stderr = ''
rc = 0
m_run_command.return_value = rc, stdout, stderr
with pytest.raises(ca_test_common.AnsibleExitJson) as result:
ceph_volume_simple_activate.main()
result = result.value.args[0]
assert result['changed']
assert result['cmd'] == ['ceph-volume', '--cluster', fake_cluster, 'simple', 'activate', '--all']
assert result['rc'] == rc
assert result['stderr'] == stderr
assert result['stdout'] == stdout
@patch.object(os.path, 'exists', return_value=True)
@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
@patch('ansible.module_utils.basic.AnsibleModule.run_command')
def test_activate_path_exists(self, m_run_command, m_exit_json, m_os_path):
ca_test_common.set_module_args({
'path': fake_path
})
m_exit_json.side_effect = ca_test_common.exit_json
stdout = ''
stderr = ''
rc = 0
m_run_command.return_value = rc, stdout, stderr
with pytest.raises(ca_test_common.AnsibleExitJson) as result:
ceph_volume_simple_activate.main()
result = result.value.args[0]
assert result['changed']
assert result['cmd'] == ['ceph-volume', '--cluster', fake_cluster, 'simple', 'activate', '--file', fake_path]
assert result['rc'] == rc
assert result['stderr'] == stderr
assert result['stdout'] == stdout
@patch.object(os.path, 'exists', return_value=False)
@patch('ansible.module_utils.basic.AnsibleModule.fail_json')
def test_activate_path_not_exists(self, m_fail_json, m_os_path):
ca_test_common.set_module_args({
'path': fake_path
})
m_fail_json.side_effect = ca_test_common.fail_json
with pytest.raises(ca_test_common.AnsibleFailJson) as result:
ceph_volume_simple_activate.main()
result = result.value.args[0]
assert result['msg'] == '{} does not exist'.format(fake_path)
assert result['rc'] == 1
@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
@patch('ansible.module_utils.basic.AnsibleModule.run_command')
def test_activate_without_systemd(self, m_run_command, m_exit_json):
ca_test_common.set_module_args({
'osd_id': fake_id,
'osd_fsid': fake_uuid,
'systemd': False
})
m_exit_json.side_effect = ca_test_common.exit_json
stdout = ''
stderr = ''
rc = 0
m_run_command.return_value = rc, stdout, stderr
with pytest.raises(ca_test_common.AnsibleExitJson) as result:
ceph_volume_simple_activate.main()
result = result.value.args[0]
assert result['changed']
assert result['cmd'] == ['ceph-volume', '--cluster', fake_cluster, 'simple', 'activate', fake_id, fake_uuid, '--no-systemd']
assert result['rc'] == rc
assert result['stderr'] == stderr
assert result['stdout'] == stdout
@patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary})
@patch.dict(os.environ, {'CEPH_CONTAINER_IMAGE': fake_container_image})
@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
@patch('ansible.module_utils.basic.AnsibleModule.run_command')
def test_activate_with_container(self, m_run_command, m_exit_json):
ca_test_common.set_module_args({
'osd_id': fake_id,
'osd_fsid': fake_uuid,
})
m_exit_json.side_effect = ca_test_common.exit_json
stdout = ''
stderr = ''
rc = 0
m_run_command.return_value = rc, stdout, stderr
with pytest.raises(ca_test_common.AnsibleExitJson) as result:
ceph_volume_simple_activate.main()
result = result.value.args[0]
assert result['changed']
assert result['cmd'] == [fake_container_binary,
'run', '--rm', '--privileged',
'--ipc=host', '--net=host',
'-v', '/etc/ceph:/etc/ceph:z',
'-v', '/var/lib/ceph/:/var/lib/ceph/:z',
'-v', '/var/log/ceph/:/var/log/ceph/:z',
'-v', '/run/lvm/:/run/lvm/',
'-v', '/run/lock/lvm/:/run/lock/lvm/',
'--entrypoint=ceph-volume', fake_container_image,
'--cluster', fake_cluster, 'simple', 'activate', fake_id, fake_uuid]
assert result['rc'] == rc
assert result['stderr'] == stderr
assert result['stdout'] == stdout
|
11871
|
import System
dataKey, _ = IN
OUT = System.AppDomain.CurrentDomain.GetData("_Dyn_Wireless_%s" % dataKey)
|
11920
|
from typing import Any, Dict, Tuple
import torch
from torch_geometric.nn import GATConv
from torch_sparse import SparseTensor, set_diag
from rgnn_at_scale.aggregation import ROBUST_MEANS
from rgnn_at_scale.models.gcn import GCN
class RGATConv(GATConv):
"""Extension of Pytorch Geometric's `GCNConv` to execute a robust aggregation function:
- soft_k_medoid
- soft_medoid (not scalable)
- k_medoid
- medoid (not scalable)
- dimmedian
Parameters
----------
mean : str, optional
The desired mean (see above for the options), by default 'soft_k_medoid'
mean_kwargs : Dict[str, Any], optional
Arguments for the mean, by default dict(k=64, temperature=1.0, with_weight_correction=True)
"""
def __init__(self, mean='soft_k_medoid',
mean_kwargs: Dict[str, Any] = dict(k=64, temperature=1.0, with_weight_correction=True),
**kwargs):
kwargs['in_channels'] = 2 * [kwargs['in_channels']]
super().__init__(**kwargs)
self._mean = ROBUST_MEANS[mean] if mean is not None else None
self._mean_kwargs = mean_kwargs
def forward(self, arguments: Tuple[torch.Tensor, SparseTensor] = None) -> torch.Tensor:
"""Predictions based on the input.
Parameters
----------
arguments : Sequence[torch.Tensor]
[x, edge indices] or [x, edge indices, edge weights], by default None
Returns
-------
torch.Tensor
the output of `GCNConv`.
Raises
------
NotImplementedError
if the arguments are not of length 2 or 3
"""
if len(arguments) == 2:
x, edge_index = arguments
edge_weight = None
elif len(arguments) == 3:
x, edge_index, edge_weight = arguments
else:
raise NotImplementedError("This method is just implemented for two or three arguments")
assert isinstance(edge_index, SparseTensor), 'GAT requires a SparseTensor as input'
assert edge_weight is None, 'The weights must be passed via a SparseTensor'
H, C = self.heads, self.out_channels
assert x.dim() == 2, 'Static graphs not supported in `GATConv`.'
x_l = x_r = self.lin_l(x).view(-1, H, C)
alpha_l = (x_l * self.att_l).sum(dim=-1)
alpha_r = (x_r * self.att_r).sum(dim=-1)
if self.add_self_loops:
edge_index = set_diag(edge_index)
# propagate_type: (x: OptPairTensor, alpha: OptPairTensor)
out = self.propagate(edge_index, x=(x_l, x_r),
alpha=(alpha_l, alpha_r))
alpha = self._alpha * edge_index.storage.value()[:, None]
self._alpha = None
if self.concat:
out = out.view(-1, self.heads * self.out_channels)
else:
out = out.mean(dim=1)
if self.bias is not None:
out += self.bias
attention_matrix = edge_index.set_value(alpha, layout='coo')
attention_matrix.storage._value = attention_matrix.storage._value.squeeze()
x = self.lin_l(x)
if self._mean is not None:
x = self._mean(attention_matrix, x, **self._mean_kwargs)
else:
x = attention_matrix @ x
x += self.bias
return x
class RGAT(GCN):
"""Generic Reliable Graph Neural Network (RGNN) implementation which currently supports a GCN architecture with the
aggregation functions:
- soft_k_medoid
- soft_medoid (not scalable)
- k_medoid
- medoid (not scalable)
- dimmedian
and with the adjacency preprocessings:
- SVD: <NAME>, <NAME>, <NAME>, and <NAME>. All you need is Low
(rank): Defending against adversarial attacks on graphs.
- GDC: <NAME>, <NAME>, and <NAME>. Diffusion Improves Graph Learning.
- Jaccard: <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>. Adversarial examples
for graph data: Deep insights into attack and defense.
Parameters
----------
mean : str, optional
The desired mean (see above for the options), by default 'soft_k_medoid'
mean_kwargs : Dict[str, Any], optional
Arguments for the mean, by default dict(k=64, temperature=1.0, with_weight_correction=True)
"""
def __init__(self,
mean: str = 'soft_k_medoid',
mean_kwargs: Dict[str, Any] = dict(k=64, temperature=1.0, with_weight_correction=True),
**kwargs):
self._mean_kwargs = dict(mean_kwargs)
self._mean = mean
super().__init__(**kwargs)
assert not self.do_checkpoint, 'Checkpointing is not supported'
def _build_conv_layer(self, in_channels: int, out_channels: int):
return RGATConv(mean=self._mean, mean_kwargs=self._mean_kwargs,
in_channels=in_channels, out_channels=out_channels)
def _cache_if_option_is_set(self, callback, x, edge_idx, edge_weight):
return SparseTensor.from_edge_index(edge_idx, edge_weight, (x.shape[0], x.shape[0])), None
|
11957
|
import os
from pathlib import Path
import requests
import shutil
import sys
from distutils.version import LooseVersion
import time
from tqdm import tqdm
from docly.parser import parser as py_parser
from docly.tokenizers import tokenize_code_string
from docly import __version__
# from c2nl.objects import Code
UPDATE_CHECK_URL = "http://3.80.2.138:8584/vercheck/check-version/"
# UPDATE_CHECK_URL = "http://127.0.0.1:5000/vercheck/check-version/"
interaction_cache = lambda : Path(Path.home() / ".docly" / "interaction_cache")
CACHE_DIR = (Path().home() / ".docly" / "file_cache")
cache_exists = lambda : CACHE_DIR.exists()
make_cache_dir = lambda : os.mkdir(str(CACHE_DIR))
def _compare_installed_version_with_latest(v1, v2):
try:
current_version = LooseVersion(v1)
latest_version = LooseVersion(v2)
assert current_version == latest_version
return True
except AssertionError:
return False
def look_for_update():
with requests.sessions.Session() as s:
try:
r = s.get(UPDATE_CHECK_URL, timeout=2)
r.raise_for_status()
if not _compare_installed_version_with_latest(__version__, r.text):
i_c = interaction_cache()
return True
return False
except Exception:
i_c = interaction_cache()
if not i_c.exists():
os.mkdir(i_c)
if not (i_c / "icache.txt").exists():
with open((i_c / "icache.txt"), "w") as f:
f.write(str(int(time.time())) + "\n")
else:
with open((i_c / "icache.txt"), "a") as f:
f.write(str(int(time.time())) + "\n")
return False
def is_dir(base_path):
if isinstance(base_path, Path):
return base_path.is_dir()
elif isinstance(base_path, str):
return Path(base_path).is_dir()
else:
return False
def is_python_file(file_path):
if isinstance(file_path, Path):
return file_path.suffix == ".py"
elif isinstance(file_path, str):
return Path(file_path).suffix == ".py"
else:
return False
def is_ipynb_notebook(file_path):
if isinstance(file_path, Path):
return file_path.suffix == ".ipynb"
elif isinstance(file_path, str):
return Path(file_path).suffix == ".ipynb"
else:
return False
def download_from_url(url, dst):
"""
@param: url to download file
@param: dst place to put the file
"""
file_size = int(requests.head(url).headers["Content-Length"])
if os.path.exists(dst):
first_byte = os.path.getsize(dst)
else:
first_byte = 0
if first_byte >= file_size:
return file_size
header = {"Range": "bytes=%s-%s" % (first_byte, file_size)}
pbar = tqdm(
total=file_size, initial=first_byte,
unit='B', unit_scale=True, desc=dst.split('/')[-1])
req = requests.get(url, headers=header, stream=True)
with(open(dst, 'ab')) as f:
for chunk in req.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
pbar.update(1024)
pbar.close()
return file_size
def check_out_path(target_path: Path):
""""
This function recursively yields all contents of a pathlib.Path object
"""
yield target_path
for file in target_path.iterdir():
if file.is_dir():
yield from check_out_path(file)
else:
yield file.absolute()
def process_file(file_path: Path, ts_lib_path: str, use_old=False):
result, parser_obj = py_parser.parse(file_path, ts_lib_path)
func_and_params = parser_obj.get_all_function_names_with_params()
if result:
for func_name, data in py_parser.get_func_body_and_docstr(parser_obj):
# print(py_toeknizer.tokenize_code_string(func_body))
# code.tokens = tokenizer.tokenize(func_body).data
# code.text = func_body
(func_body, docstr), start, end = data
ret_start = (start[0]+1, start[1])
params = func_and_params[func_name]
code_str = [tokenize_code_string(func_body)] if use_old else func_body
yield code_str, params, ret_start, func_name, docstr.strip()
def query_yes_no(question, default="yes"):
"""Ask a yes/no question and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes", "no", or None (meaning
an answer is required of the user).
The "answer" return value is True for "yes" or False for "no".
"""
valid = {"yes": True, "y": True, "ye": True,
"no": False, "n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '{}}'".format(default))
while True:
print(question + prompt)
choice = input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
print("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
|
12008
|
import os
import cv2
import time
import json
import random
import inspect
import argparse
import numpy as np
from tqdm import tqdm
from dataloaders import make_data_loader
from models.sync_batchnorm.replicate import patch_replication_callback
from models.vs_net import *
from utils.loss import loss_dict
from utils.lr_scheduler import LR_Scheduler
from utils.saver import Saver
from utils.summaries import TensorboardSummary
from utils.metrics import Evaluator
from utils import utils
from torch.autograd import Variable
import os.path as osp
from configs import *
import warnings
warnings.filterwarnings("ignore")
class Trainer(object):
def __init__(self, cfg):
self.cfg = cfg
# Define Saver
self.saver = Saver(cfg)
# Define Tensorboard Summary
self.summary = TensorboardSummary(self.cfg["log_tb_dir"])
self.summary.create_summary()
# Define Dataloader
kwargs = {"num_workers": cfg["num_workers"], "pin_memory": True}
self.train_loader, self.val_loader, self.test_loader, dset = make_data_loader(
cfg, **kwargs)
# read landmark centers
self.id2center = np.array(json.load(
open(osp.join(cfg["data_dir"], "id2centers.json")))).astype(np.float64)
self.coding_book = torch.zeros(
(len(self.id2center), cfg["seg_channel"]), dtype=torch.float32).cuda()
torch.nn.init.xavier_uniform(self.coding_book)
print("coding book size = {}".format(self.coding_book.shape))
# generate color map
unique_label = np.arange(len(self.id2center))
unique_label = unique_label.astype(
np.int64) * 6364136223846793005 + 1442695040888963407
color_map = np.zeros((unique_label.shape[0], 3), np.uint8)
color_map[:, 0] = np.bitwise_and(unique_label, 0xff)
color_map[:, 1] = np.bitwise_and(np.right_shift(unique_label, 4), 0xff)
color_map[:, 2] = np.bitwise_and(np.right_shift(unique_label, 8), 0xff)
self.color_map = np.array(color_map)
self.coding_book = Variable(self.coding_book, requires_grad=True)
# Define network
model = VSNet(backbone=cfg["backbone"],
seg_decoder=cfg["seg_decoder"],
vertex_decoder=cfg["vertex_decoder"],
seg_channel=cfg["seg_channel"],
vertex_channel=cfg["vertex_channel"],
output_stride=cfg["out_stride"],
sync_bn=cfg["sync_bn"])
train_params = [{"params": model.get_1x_lr_params(), "lr": cfg["lr"]},
{"params": model.get_10x_lr_params(),
"lr": cfg["lr"] * 10},
{"params": self.coding_book, "lr": cfg["lr"] * 10}
]
# Define Optimizer
if cfg["optimizer"] == "SGD":
optimizer = torch.optim.SGD(train_params, momentum=cfg["momentum"],
weight_decay=cfg["weight_decay"], nesterov=cfg["nesterov"])
elif cfg["optimizer"] == "Adam":
optimizer = torch.optim.Adam(train_params, lr=cfg["lr"],
weight_decay=cfg["weight_decay"], amsgrad=True)
else:
raise NotImplementedError
# Define Criterion
self.seg_criterion = loss_dict[cfg["seg_loss_type"]]
self.vertex_criterion = loss_dict[cfg["vertex_loss_type"]]
self.model, self.optimizer = model, optimizer
# Define Evaluator
self.evaluator = Evaluator(
self.coding_book.shape[0], cfg["vertex_channel"])
# Define lr scheduler
self.scheduler = LR_Scheduler(mode=cfg["lr_scheduler"], base_lr=cfg["lr"],
num_epochs=cfg["epochs"], iters_per_epoch=len(
self.train_loader),
lr_step=cfg["lr_step"])
self.model = torch.nn.DataParallel(self.model)
patch_replication_callback(self.model)
self.model = self.model.cuda()
# Resuming checkpoint
self.best_pred = {"mIoU": 0.0, "Acc": 0.0, "Acc": 0.0,
"FWIoU": 0.0, "translation_median": 1000}
if cfg["resume"] is not None and cfg["resume"] == True:
print(os.path.isfile(cfg["resume_checkpoint"]))
if not os.path.isfile(cfg["resume_checkpoint"]):
raise RuntimeError("=> no checkpoint found at {}" .format(
cfg["resume_checkpoint"]))
checkpoint = torch.load(cfg["resume_checkpoint"])
cfg.opt["start_epoch"] = checkpoint["epoch"] - 1
self.model.module.load_state_dict(checkpoint["state_dict"])
if not cfg["ft"]:
self.optimizer.load_state_dict(checkpoint["optimizer"])
self.best_pred = checkpoint["best_pred"]
if "coding_book" in checkpoint.keys():
assert self.coding_book.shape == checkpoint["coding_book"].shape
self.coding_book = checkpoint["coding_book"]
else:
print("Alert! coding book does not exist in the checkpoint")
print("=> loaded checkpoint {} (epoch {})"
.format(cfg["resume"], checkpoint["epoch"]))
def validation(self, epoch):
print("=================================")
print("validation")
print("=================================")
self.model.eval()
self.evaluator.reset()
tbar = tqdm(self.val_loader, desc="\r")
num_iter_val = len(self.val_loader)
test_loss = 0.0
num_images = 0
ten_count = []
five_count = []
three_count = []
one_count = []
translation_list = []
angular_list = []
reproject_list = []
test_seg_loss = 0.0
test_ver_loss = 0.0
for i, data in enumerate(tbar):
image, seg_target, vertex_target = [d.cuda() for d in data[:3]]
valid_mask = data[-1].cuda()
pose_target, camera_k_matrix, ori_img = data[3:]
seg_target = seg_target.long()
valid_mask = (seg_target.detach() > 0).float()
with torch.no_grad():
seg_pred, vertex_pred, seg_pred_x4s = self.model(
image)
loss_seg = 0
if self.cfg["seg_decoder"]:
loss_seg = self.seg_criterion(seg_pred, seg_target, self.coding_book,
margin=self.cfg["seg_loss_margin"],
seg_k=self.cfg["seg_k"],
valid_mask=valid_mask)
test_seg_loss += loss_seg.item()
self.summary.add_scalar(
"val/loss_seg_iter", loss_seg.item(), i + num_iter_val * epoch)
loss_vertex = 0
if self.cfg["vertex_decoder"]:
loss_vertex = self.vertex_criterion(vertex_pred, vertex_target,
valid_mask)
test_ver_loss += loss_vertex.item()
self.summary.add_scalar(
"val/loss_vertex_iter", loss_vertex.item(), i + num_iter_val * epoch)
loss = 0
if self.cfg["seg_decoder"]:
loss += loss_seg
if self.cfg["vertex_decoder"]:
loss += loss_vertex * self.cfg["vertex_loss_ratio"]
test_loss += loss.item()
tbar.set_description("Test loss: %.9f" % (test_loss / (i + 1)))
self.summary.add_scalar(
"val/total_loss_iter", loss.item(), i + num_iter_val * epoch)
global_step = i * \
self.cfg["val_batch_size"] + image.data.shape[0]
# evaluate seg_pred
seg_target = seg_target.detach().squeeze()
if self.cfg["seg_decoder"]:
seg_pred, knn = utils.evaluate_segmentation(seg_pred_x4s,
self.coding_book, seg_target.size(), self.cfg["use_own_nn"])
else:
seg_pred = seg_target
# evaluate vertex
pt3d_filter, pt2d_filter, _ = utils.evaluate_vertex_v2(vertex_pred, seg_pred,
self.id2center, inlier_thresh=0.999,
min_mask_num=self.cfg["val_label_filter_threshsold"])
# pt3d_filter, pt2d_filter = utils.evaluate_vertex(vertex_target, seg_pred, self.id2center)
camera_k_matrix = camera_k_matrix.squeeze().numpy()
translation_distance, angular_distance, error = 1e9, 1e9, 1e9
if pt2d_filter.shape[0] > 6:
# pnp
ret, pose_pred = utils.pnp(
pt3d_filter, pt2d_filter, camera_k_matrix)
error = utils.reproject_error(
pt3d_filter, pt2d_filter, pose_pred, camera_k_matrix)
translation_distance, angular_distance = utils.cm_degree_metric(
pose_pred, pose_target)
print(translation_distance, angular_distance, error, i)
ten_count.append(translation_distance <
10 and angular_distance < 10)
five_count.append(translation_distance <
5 and angular_distance < 5)
three_count.append(translation_distance <
3 and angular_distance < 3)
one_count.append(translation_distance <
1 and angular_distance < 1)
translation_list.append(translation_distance)
angular_list.append(angular_distance)
reproject_list.append(error)
# Add batch sample into evaluator
if self.cfg["seg_decoder"]:
self.evaluator.add_seg_batch(seg_target, seg_pred)
if self.cfg["visualize_segmenation"]:
self.summary.visualize_seg_image(ori_img, seg_pred, seg_target,
epoch, i, global_step, self.color_map)
if self.cfg["vertex_decoder"]:
# evaluate vertex_pred
vertex_target, vertex_pred = vertex_target.squeeze(), vertex_pred.squeeze()
self.evaluator.add_vertex_batch(vertex_target, vertex_pred)
# vertex acc的计算
if self.cfg["visualize_voting"]:
if self.cfg["visualize_landmark"] != None and self.cfg["visualize_landmark"]:
self.summary.visualize_vertex_image(ori_img, vertex_pred, vertex_target,
epoch, i, global_step, pt2d_filter, True)
else:
self.summary.visualize_vertex_image(ori_img, vertex_pred, vertex_target,
epoch, i, global_step)
mIoU, Acc, Acc_class, FWIoU = self.summary.visualize_seg_evaluator(
self.evaluator, epoch, "val/seg/")
print("Validation:")
print("[Epoch: %d, numImages: %5d]" % (epoch, num_images))
print("Loss: %.9f" % (test_loss / num_iter_val))
self.summary.add_scalar("val/total_loss_epoch",
test_loss / num_iter_val, epoch)
self.summary.add_scalar("val/total_seg_epoch",
test_seg_loss / num_iter_val, epoch)
self.summary.add_scalar("val/total_ver_epoch",
test_ver_loss / num_iter_val, epoch)
self.summary.add_scalar("val/pnp/10cm_epoch",
np.mean(ten_count), epoch)
self.summary.add_scalar("val/pnp/5cm_epoch",
np.mean(five_count), epoch)
self.summary.add_scalar("val/pnp/3cm_epoch",
np.mean(three_count), epoch)
self.summary.add_scalar("val/pnp/1cm_epoch", np.mean(one_count), epoch)
self.summary.add_scalar(
"val/pnp/translation_median_epoch", np.median(translation_list), epoch)
self.summary.add_scalar(
"val/pnp/angular_median_epoch", np.median(angular_list), epoch)
new_pred = {"mIoU": mIoU.item(), "Acc": Acc.item(), "Acc_class": Acc_class.item(), "FWIoU": FWIoU.item(),
"10cm": np.mean(ten_count),
"5cm": np.mean(five_count), "3cm": np.mean(three_count), "1cm": np.mean(one_count),
"translation_median": np.median(translation_list), "angular_list": np.median(angular_list)}
print(new_pred)
if new_pred["translation_median"] < self.best_pred["translation_median"]:
is_best = True
self.best_pred = new_pred
self.saver.save_checkpoint({
"epoch": epoch + 1,
"state_dict": self.model.module.state_dict(),
"optimizer": self.optimizer.state_dict(),
"best_pred": self.best_pred,
"coding_book": self.coding_book
}, is_best, save_model=self.cfg["save_model"])
def main():
parser = argparse.ArgumentParser(
description="PyTorch Landmark Segmentation Training")
parser.add_argument("--dataset", type=str,
choices=["7scenes_loc", "cambridge_loc"], help="experiment config file")
parser.add_argument("--scene", type=str, default="",
help="experiment scene")
parser.add_argument("--gpu-id", type=str, default="",
help="experiment gpu id")
parser.add_argument("--use-aug", type=str, default="",
choices=["", "true", "false"], help="experiment use aug")
parser.add_argument("--config", type=str, default=None,
help="experiment config file")
parser.add_argument("--debug", type=str, default="",
choices=["", "true", "false"], help="debug")
parser.add_argument("--resume", type=str, default="true",
choices=["", "true", "false"], help="resume")
args = parser.parse_args()
debug = None
if args.debug != "":
debug = (args.debug == "true")
if args.dataset == "7scenes_loc":
cfg = SevenScenesLocConfig(args.config, debug)
elif args.dataset == "cambridge_loc":
cfg = CambridgeLocConfig(args.config, debug)
if args.scene != "":
cfg.opt["scene"] = args.scene
if args.gpu_id != "":
cfg.opt["devices"] = args.gpu_id
if args.use_aug == "true":
cfg.opt["use_aug"] = True
if args.resume == "true":
cfg.opt["resume"] = True
cfg.opt["resume_checkpoint"] = cfg["export_dir"] + \
'/ckpts/checkpoint-backup.pth.tar'
cfg.print_opt()
cfg.set_environmental_variables()
torch.manual_seed(cfg["seed"])
torch.cuda.manual_seed(cfg["seed"])
np.random.seed(cfg["seed"])
random.seed(cfg["seed"])
trainer = Trainer(cfg)
print("Starting Epoch:", trainer.cfg["start_epoch"])
print("Total Epoches:", trainer.cfg["epochs"])
trainer.validation(trainer.cfg["start_epoch"])
trainer.summary.close()
if __name__ == "__main__":
main()
|
12009
|
import numpy as np
import gzip
import pickle
import os
import urllib.request
class MNIST:
host = 'http://yann.lecun.com/exdb/mnist/'
filenames = {
'train': ('train-images-idx3-ubyte.gz', 'train-labels-idx1-ubyte.gz'),
'test': ('t10k-images-idx3-ubyte.gz', 't10k-labels-idx1-ubyte.gz'),
}
dataset_filename = 'mnist.pkl.gz'
train_samples = 50000
validation_samples = 10000
test_samples = 10000
def __init__(self):
self.current_dir = os.path.dirname(__file__)
if not self.is_dataset_available():
print('Dataset not available! It will be downloaded and decoded, and can be take a while, please wait!')
datasets = self.get_base_datasets_filenames()
for dataset in datasets:
if not self.is_base_dataset_downloaded(dataset):
print(f'Downloading {dataset}...')
self.download_dataset(dataset)
print('Decoding files and saving it...')
self.decode_and_save()
print('Deleting base files (downloaded)...')
for dataset in datasets:
self.delete_dataset(dataset)
print('Done.')
def is_dataset_available(self):
return os.path.exists(os.path.join(self.current_dir, self.dataset_filename))
def get_base_datasets_filenames(self):
return self.filenames['train'] + self.filenames['test']
def is_base_dataset_downloaded(self, filename):
return os.path.exists(os.path.join(self.current_dir, filename))
def download_dataset(self, filename):
url = self.host + filename
dest = os.path.join(self.current_dir, filename)
urllib.request.urlretrieve(url, dest)
def delete_dataset(self, filename):
os.remove(os.path.join(self.current_dir, filename))
def decode_and_save(self):
data = {}
for key, (images_filename, labels_filename) in self.filenames.items():
with gzip.open(os.path.join(self.current_dir, images_filename), 'rb') as file:
images = np.frombuffer(file.read(), np.uint8, offset=16).reshape(-1, 28*28)
with gzip.open(os.path.join(self.current_dir, labels_filename), 'rb') as file:
labels = np.frombuffer(file.read(), np.uint8, offset=8)
data[key] = (images, labels)
training = tuple(x[:self.train_samples] for x in data['train'])
validation = tuple(x[self.train_samples:] for x in data['train'])
test = tuple(data['test'])
with gzip.open(os.path.join(self.current_dir, self.dataset_filename), 'wb') as file:
pickle.dump((training, validation, test), file)
def load(self):
with gzip.open(os.path.join(self.current_dir, self.dataset_filename), 'rb') as file:
training, validation, test = pickle.load(file)
return training, validation, test
|
12038
|
from amitools.binfmt.BinImage import *
from .ELFFile import *
from .ELF import *
from .ELFReader import ELFReader
from .DwarfDebugLine import DwarfDebugLine
class BinFmtELF:
"""Handle Amiga m68k binaries in ELF format (usually from AROS)"""
def is_image(self, path):
"""check if a given file is a supported ELF file"""
with open(path, "rb") as f:
return self.is_image_fobj(f)
def is_image_fobj(self, fobj):
"""check if a given fobj is a supported ELF file"""
try:
pos = fobj.tell()
# read identifier
ident = ELFIdentifier()
ident_data = fobj.read(16)
ident.parse(ident_data)
# read header
hdr = ELFHeader()
hdr_data = fobj.read(36)
hdr.parse(hdr_data)
# seek back
fobj.seek(pos, 0)
# check header
return self.is_supported_elf(ident, hdr)
except ELFParseError:
return False
def is_supported_elf(self, ident, hdr):
"""check ELF header if its a m68k binary"""
if hdr.machine != EM_68K:
return False
if ident.osabi not in (ELFOSABI_SYSV, ELFOSABI_AROS):
return False
return True
def load_image(self, path):
"""load a BinImage from an ELF file given via path"""
with open(path, "rb") as f:
return self.load_image_fobj(f)
def load_image_fobj(self, fobj):
"""load a BinImage from an ELF file given via file object"""
# read elf file
reader = ELFReader()
elf = reader.load(fobj)
# create bin image and assign elf file
bi = BinImage(BIN_IMAGE_TYPE_ELF)
bi.set_file_data(elf)
# walk through elf sections
sect_to_seg = {}
for sect in elf.sections:
# determine segment type
seg_type = None
name = sect.name_str
flags = 0
if name == b".text":
seg_type = SEGMENT_TYPE_CODE
elif name == b".data":
seg_type = SEGMENT_TYPE_DATA
elif name == b".rodata":
seg_type = SEGMENT_TYPE_DATA
flags = SEGMENT_FLAG_READ_ONLY
elif name == b".bss":
seg_type = SEGMENT_TYPE_BSS
# we got a segment
if seg_type is not None:
size = sect.header.size
data = sect.data
seg = Segment(seg_type, size, data, flags)
bi.add_segment(seg)
# assign section to segment
seg.set_file_data(sect)
sect_to_seg[sect] = seg
# now run through segments to add relocations
bi_segs = bi.get_segments()
for seg in bi_segs:
# retrieve associated ELF section
sect = seg.get_file_data()
# any relocations?
rela = sect.get_rela()
num_rela = len(rela)
if num_rela > 0:
self.add_elf_rela(sect, seg, sect_to_seg)
# any symbols?
symbols = sect.get_symbols()
num_syms = len(symbols)
if num_syms > 0:
self.add_elf_symbols(symbols, seg)
# try to add debug info
ddl = DwarfDebugLine()
got = ddl.decode(elf)
if got:
self.add_debug_line(ddl, bi, sect_to_seg)
return bi
def add_elf_rela(self, sect, seg, sect_to_seg):
for tgt_sect in sect.get_rela_sections():
# is this a relocation to a used section?
if tgt_sect in sect_to_seg:
to_seg = sect_to_seg[tgt_sect]
rl = Relocations(to_seg)
seg.add_reloc(to_seg, rl)
# add relocations
for rel in sect.get_rela_by_section(tgt_sect):
r = Reloc(rel.offset, addend=rel.section_addend)
rl.add_reloc(r)
def add_elf_symbols(self, symbols, seg):
symtab = SymbolTable()
seg.set_symtab(symtab)
for sym in symbols:
# add entry
off = sym.value
name = sym.name_str
file_sym = sym.file_sym
if file_sym is not None:
file_name = file_sym.name_str
else:
file_name = None
symbol = Symbol(off, name, file_name)
symtab.add_symbol(symbol)
def add_debug_line(self, ddl, bi, sect_to_seg):
seg_to_dl = {}
matrix = ddl.get_matrix()
for row in matrix:
sect = row.section
if sect in sect_to_seg:
segment = sect_to_seg[sect]
# fetch debug info
if segment in seg_to_dl:
dl, file_to_df = seg_to_dl[segment]
else:
dl = DebugLine()
file_to_df = {}
segment.set_debug_line(dl)
seg_to_dl[segment] = (dl, file_to_df)
# fetch file instance
fid = row.file
if fid in file_to_df:
df = file_to_df[fid]
else:
df = DebugLineFile(ddl.get_file_name(fid), ddl.get_file_dir(fid))
dl.add_file(df)
file_to_df[fid] = df
# add entry
e = DebugLineEntry(row.address, row.line)
df.add_entry(e)
# mini test
if __name__ == "__main__":
import sys
bf = BinFmtELF()
for a in sys.argv[1:]:
if bf.is_image(a):
print("loading", a)
bi = bf.load_image(a)
print(bi)
else:
print("NO ELF:", a)
|
12049
|
import tvm
import sys
import time
import numpy as np
from tvm.tensor_graph.testing.models import resnet
from tvm.tensor_graph.core import ForwardGraph, BackwardGraph, compute, \
GraphTensor, GraphOp, PyTIRGraph
from tvm.tensor_graph.nn import CELoss, SGD
from tvm.tensor_graph.core.schedule_generator import ConnectedSet, GPUScheduleBaseSet, \
GPUScheduleMasterBaseSet, form_connected_sets, GPUScheduleMasterSet, \
SingleCut, form_cut_candidates, LayoutTransform
from tvm.tensor_graph.core.utils import flatten_tir_graph
from tvm.tensor_graph.core.space import PrimitiveSpace, PartitionSpace, ForwardGraphSpace
from tvm.tensor_graph.core.tuner import RandomPrimitiveTuner, RandomPartitionTuner, RandomForwardTuner
from tvm.tensor_graph.core.scheduler import PrimitiveScheduler as Scheduler
from tvm.tensor_graph.core.scheduler import schedule_all
from tvm.tensor_graph.core.build_graph import build_all
from tvm.tensor_graph.core.runtime import run_all
def test1():
print("test 1 ##############################")
batch = 64
img_shape = [batch, 3, 224, 224]
num_classes = 1000
label_shape = [batch, num_classes]
dtype = "float32"
model = resnet.resnet50(num_classes=1000)
img_tensor = GraphTensor(img_shape, dtype=dtype, name="image")
label_tensor = GraphTensor(label_shape, dtype=dtype, name="label")
# get output_tensor
output_tensor = model(img_tensor)
# get the weights tensors
weights_tensors = []
for w in model.weights():
weights_tensors.append(w)
# this is data
img_np = np.random.uniform(-1, 1, img_shape).astype(dtype)
label_np = np.random.uniform(-1, 1, [batch, num_classes]).astype(dtype)
ce_loss = CELoss(label_tensor)
sgd = SGD(0.002)
fwd_graph = ForwardGraph([img_tensor], [output_tensor], weights_tensors)
begin = time.time()
# change data layout
forward_space = ForwardGraphSpace()
forward_tuner = RandomForwardTuner(forward_space)
layout_generator = LayoutTransform(fwd_graph, forward_space, forward_tuner)
fgraph = layout_generator.generate()
after_layout = time.time()
# autodiff
bgraph = fgraph.make_backward(ce_loss, sgd)
after_autodiff = time.time()
# make tir graph
inputs = [x.tvm_tensor for x in bgraph.inputs]
weights = [x.tvm_tensor for x in bgraph.weights]
outputs = [x.tvm_tensor for x in bgraph.outputs]
# labels = [x.tvm_tensor for x in bgraph.labels]
# loss = bgraph.loss.tvm_tensor
# gradients = [x.tvm_tensor for x in bgraph.gradients]
# updates = [x.tvm_tensor for x in bgraph.updates]
labels = []
loss = None
gradients = []
lr = None
updates = []
tgraph = PyTIRGraph(
inputs,
labels,
outputs,
weights,
loss,
gradients,
lr,
updates)
after_tir_graph = time.time()
# subgraph partition
partition_space = PartitionSpace()
partition_tuner = RandomPartitionTuner(partition_space)
cut_candidates = form_cut_candidates(tgraph)
# print(cut_candidates)
for i, candidate in enumerate(cut_candidates):
name = "graph_cut_" + str(i)
partition_generator = SingleCut(tgraph, name, candidate, partition_space, partition_tuner)
partition_generator.generate()
# for op, stat in tgraph.op_stat_dict.items():
# print(op, " head=", stat.head)
tgraph.partition_graph()
after_partition = time.time()
print("num subgraphs:", len(tgraph.subgraphs))
target = "cuda"
dev = 0
# update the op stat dict of subgraphs
# do auto-schedule
total_build_trials = 0
build_time_record = []
for mark, subgraph in tgraph.subgraphs.items():
# print("subgraph", mark)
tensors = list(subgraph.outputs.keys()) + list(subgraph.loss.keys()) \
+ list(subgraph.gradients.keys()) + list(subgraph.updates.keys())
ops = [x.op for x in tensors]
op_list, down_graph = flatten_tir_graph(ops, output_first=True)
op_stat_dict = {}
for op in op_list:
v = tgraph.op_map[op]
if v in tgraph.op_stat_dict:
op_stat_dict[op] = tgraph.op_stat_dict[v]
c_list = form_connected_sets(subgraph, op_stat_dict, tensors, ops, down_graph)
# print("c_list_length=", len(c_list))
# print("check connected set")
# for connected_set in c_list:
# print(connected_set)
scheduler = Scheduler()
# sch = tgraph.schedules[mark]
for i, connected_set in enumerate(c_list):
name = "subgraph_" + str(mark) + "_connect_" + str(i)
assert not connected_set.empty()
build_success = False
for trial in range(10):
total_build_trials += 1
tgraph.create_schedule_for(mark=mark)
sch = tgraph.schedules[mark]
if connected_set.has_master():
if connected_set.iso_base():
PrimitiveScheduler = GPUScheduleMasterBaseSet
else:
PrimitiveScheduler = GPUScheduleMasterSet
primitive_generator = PrimitiveScheduler(
name, subgraph, connected_set, down_graph, op_stat_dict, scheduler)
else:
PrimitiveScheduler = GPUScheduleBaseSet
primitive_generator = PrimitiveScheduler(
name, connected_set, scheduler)
primitive_generator.generate(sch)
# try:
# print(tvm.lower(sch, tgraph.bufs[mark], simple_mode=True))
# except Exception as e:
# print(e)
# print("prologue")
# for p in connected_set.prologue:
# print(p.body)
# print("epilogue")
# for e in connected_set.epilogue:
# print(e.body)
# print("base")
# print(connected_set.base.body)
# print("master")
# print(connected_set.master.body)
# print(connected_set.master.input_tensors)
# for op, master in connected_set.prologue.items():
# in_input = False
# for inp in master.input_tensors:
# if op == inp.op:
# in_input = True
# break
# if not in_input:
# print(op, "not in the inputs of", master)
build_beg = time.time()
build_success = tgraph.build_for(target, mark=mark)
build_end = time.time()
build_time_record.append(build_end - build_beg)
if build_success:
break
if not build_success:
raise RuntimeError("Can't build for subgraph", mark)
after_schedule = time.time()
tgraph.set_inputs({bgraph.inputs[0].tvm_tensor: img_np})
# tgraph.set_labels({bgraph.labels[0].tvm_tensor: label_np})
# tgraph.set_lr(optimize_engine.get_lr())
tgraph.allocate_buffer(target, dev)
beg = time.time()
for mark in tgraph.call_order:
func = tgraph.functions[mark]
bufs = tgraph.bufs[mark]
real_bufs = [tgraph.tvm_array_dict[tgraph.subgraphs[mark].index[x]] for x in bufs]
func_beg = time.time()
func(*real_bufs)
func_end = time.time()
print((func_end - func_beg) * 1e3, "ms")
end = time.time()
print("End to end time:", (end - beg) * 1e3, "ms")
print("total build trails=", total_build_trials)
print("layout change time cost=", (after_layout - begin) * 1e3, "ms")
print("autodiff time cost=", (after_autodiff - after_layout) * 1e3, "ms")
print("make tir_graph time cost=", (after_tir_graph - after_autodiff) * 1e3, "ms")
print("subgraph partition time cost=", (after_partition - after_tir_graph) * 1e3, "ms")
print("schedule time cost=", (after_schedule - after_partition) * 1e3, "ms. average=",
(after_schedule - after_partition) * 1e3 / total_build_trials, "ms")
print("average build time cost=", np.array(build_time_record).mean() * 1e3, "ms")
print("total build time cost=", (after_schedule - begin) * 1e3, "ms")
print("Success!")
def test2(file=sys.stdout):
print("test 2 ##############################")
batch = 64
img_shape = [batch, 3, 224, 224]
num_classes = 1000
label_shape = [batch, num_classes]
dtype = "float32"
model = resnet.resnet50(num_classes=1000)
img_tensor = GraphTensor(img_shape, dtype=dtype, name="image")
label_tensor = GraphTensor(label_shape, dtype=dtype, name="label")
# get output_tensor
output_tensor = model(img_tensor)
# get the weights tensors
weights_tensors = []
for w in model.weights():
weights_tensors.append(w)
# this is data
img_np = np.random.uniform(-1, 1, img_shape).astype(dtype)
label_np = np.random.uniform(-1, 1, [batch, num_classes]).astype(dtype)
ce_loss = CELoss(label_tensor)
sgd = SGD(0.002)
fwd_graph = ForwardGraph([img_tensor], [output_tensor], weights_tensors)
tir_graph = schedule_all(fwd_graph, loss=ce_loss, optimizer=sgd, inference=False)
print(len(tir_graph.subgraphs))
print("different subgraphs:", len(set(tir_graph.subgraph_features.values())), file=file)
print("direrent ops:", len(set(tir_graph.op_feature_dict.values())), file=file)
print("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
for k, v in tir_graph.op_map.items():
print(k.name, v.name, file=file)
print("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
tmp = {}
for f in set(tir_graph.op_feature_dict.values()):
if f.split(")")[-1] not in tmp:
tmp[f.split(")")[-1]] = []
tmp[f.split(")")[-1]].append(f)
print("different kinds of ops:", len(tmp), file=file)
for k, v in tmp.items():
print(k, file=file)
for vv in v:
print(" ", vv, file=file)
print("####################################################", file=file)
tmp = {}
for f in set(tir_graph.subgraph_features.values()):
key = ";".join([x.split(")")[-1] for x in f.split(";")])
if key not in tmp:
tmp[key] = []
tmp[key].append(f)
print("different kinds of subgraphs:", len(tmp), file=file)
for k, v in tmp.items():
print(k, file=file)
for vv in v:
print(" ", vv, file=file)
for k, v in tir_graph.subgraph_features.items():
key = ";".join([x.split(")")[-1] for x in v.split(";")])
if key == "collect_3_dim4;grad_bn2d_to_conv2d_nchw_8;grad_bn2d_var_to_conv2d_nchw_10;grad_bn2d_mean_to_conv2d_nchw_2;collect_2_dim1":
i = 1
for op in tir_graph.subgraphs[k].op_list:
print(i, ". #####")
i += 1
print(op.body)
print(op.input_tensors)
break
# target = "cuda"
# dev = 0
# print("begin schedule")
# beg_build = time.time()
# build_all(fwd_graph, tir_graph, target=target, build_trial=10)
# end_build = time.time()
# print("num functions:", len(tir_graph.shared_functions))
# print("build time cost=", (end_build - beg_build) * 1e3, "ms")
# try:
# run_all(tir_graph, [img_np], [label_np], sgd.get_lr(), target=target, dev=dev)
# except Exception as e:
# print("run error:", e)
print("Success", file=file)
def test3():
print("test 3 ##############################")
batch = 64
img_shape = [batch, 3, 224, 224]
num_classes = 1000
label_shape = [batch, num_classes]
dtype = "float32"
model = resnet.resnet50(num_classes=1000)
img_tensor = GraphTensor(img_shape, dtype=dtype, name="image")
label_tensor = GraphTensor(label_shape, dtype=dtype, name="label")
# get output_tensor
output_tensor = model(img_tensor)
# get the weights tensors
weights_tensors = []
for w in model.weights():
weights_tensors.append(w)
# this is data
img_np = np.random.uniform(-1, 1, img_shape).astype(dtype)
label_np = np.random.uniform(-1, 1, [batch, num_classes]).astype(dtype)
ce_loss = CELoss(label_tensor)
sgd = SGD(0.002)
fwd_graph = ForwardGraph([img_tensor], [output_tensor], weights_tensors)
tir_graph = schedule_all(fwd_graph)
print(len(tir_graph.subgraphs))
print("different subgraphs:", len(set(tir_graph.subgraph_features.values())))
print("direrent ops:", len(set(tir_graph.op_feature_dict.values())))
tmp = {}
# for f in set(tir_graph.op_feature_dict.values()):
# if f.split(")")[-1] not in tmp:
# tmp[f.split(")")[-1]] = []
# tmp[f.split(")")[-1]].append(f)
# for k, v in tmp.items():
# print(k)
# for vv in v:
# print(" ", vv)
print("####################################################")
tmp = {}
# for f in set(tir_graph.subgraph_features.values()):
# key = ";".join([x.split(")")[-1] for x in f.split(";")])
# if key not in tmp:
# tmp[key] = []
# tmp[key].append(f)
print("different kinds of subgraphs:", len(tmp))
for k, v in tmp.items():
print(k)
for vv in v:
print(" ", vv)
# target = "cuda"
# dev = 1
# print("begin build")
# beg_build = time.time()
# build_all(fwd_graph, tir_graph, target=target, build_trial=10)
# end_build = time.time()
# print("num functions:", len(tir_graph.shared_functions))
# print("build time cost=", (end_build - beg_build) * 1e3, "ms")
# try:
# run_all(tir_graph, [img_np], target=target, dev=dev)
# except Exception as e:
# print("run error:", e)
print("Success")
if __name__ == "__main__":
with open("trace_resnet_subgraph.log", "w") as fout:
test2(file=fout)
# test3()
|
12058
|
import sys
from typing import Generator
from typing import List
from typing import Optional
import pytest
from _pytest.pytester import Pytester
def test_one_dir_pythonpath(pytester: Pytester, file_structure) -> None:
pytester.makefile(".ini", pytest="[pytest]\npythonpath=sub\n")
result = pytester.runpytest("test_foo.py")
assert result.ret == 0
result.assert_outcomes(passed=1)
def test_two_dirs_pythonpath(pytester: Pytester, file_structure) -> None:
pytester.makefile(".ini", pytest="[pytest]\npythonpath=sub sub2\n")
result = pytester.runpytest("test_foo.py", "test_bar.py")
assert result.ret == 0
result.assert_outcomes(passed=2)
def test_unconfigure_unadded_dir_pythonpath(pytester: Pytester) -> None:
pytester.makeconftest(
"""
def pytest_configure(config):
config.addinivalue_line("pythonpath", "sub")
"""
)
pytester.makepyfile(
"""
import sys
def test_something():
pass
"""
)
result = pytester.runpytest()
result.assert_outcomes(passed=1)
def test_clean_up_pythonpath(pytester: Pytester) -> None:
"""Test that the srcpaths plugin cleans up after itself."""
pytester.makefile(".ini", pytest="[pytest]\npythonpath=I_SHALL_BE_REMOVED\n")
pytester.makepyfile(test_foo="""def test_foo(): pass""")
before: Optional[List[str]] = None
after: Optional[List[str]] = None
class Plugin:
@pytest.hookimpl(hookwrapper=True, tryfirst=True)
def pytest_unconfigure(self) -> Generator[None, None, None]:
nonlocal before, after
before = sys.path.copy()
yield
after = sys.path.copy()
result = pytester.runpytest_inprocess(plugins=[Plugin()])
assert result.ret == 0
assert before is not None
assert after is not None
assert any("I_SHALL_BE_REMOVED" in entry for entry in before)
assert not any("I_SHALL_BE_REMOVED" in entry for entry in after)
|
12080
|
from cytoolz.functoolz import (
curry,
)
from eth_utils import (
to_dict,
to_tuple,
)
@curry
@to_dict
def normalize_dict(value, normalizers):
for key, item in value.items():
normalizer = normalizers[key]
yield key, normalizer(item)
@curry
@to_tuple
def normalize_array(value, normalizer):
"""
This is just `map` but it's nice to have it return a consisten type
(tuple).
"""
for item in value:
yield normalizer(item)
@curry
def normalize_if(value, conditional_fn, normalizer):
if conditional_fn(value):
return normalizer(value)
else:
return value
|
12090
|
import os
import shutil
import unittest
from base64 import b64encode
from sonLib.bioio import TestStatus
from sonLib.bioio import getTempFile
from sonLib.bioio import getTempDirectory
from sonLib.bioio import system
from toil.job import Job
from toil.common import Toil
from cactus.shared.common import cactus_call, ChildTreeJob
class TestCase(unittest.TestCase):
def setUp(self):
self.testNo = TestStatus.getTestSetup(1, 5, 10, 100)
self.tempDir = getTempDirectory(os.getcwd())
self.tempFiles = []
unittest.TestCase.setUp(self)
def tearDown(self):
unittest.TestCase.tearDown(self)
system("rm -rf %s" % self.tempDir)
@TestStatus.shortLength
def testCactusCall(self):
inputFile = getTempFile(rootDir=self.tempDir)
with open("/dev/urandom", "rb") as randText:
with open(inputFile, 'w') as fh:
fh.write(b64encode(randText.read(1024)).decode())
with open(inputFile) as fh:
input = "".join(fh.read().split("\n"))
#Send input to container's stdin through a file, get output
#from stdout
output = "".join(cactus_call(infile=inputFile, check_output=True,
parameters=["docker_test_script"]).split("\n"))
self.assertEqual(input, output)
#Send input as string, get output from stdout
output = "".join(cactus_call(stdin_string=input, check_output=True,
parameters=["docker_test_script"]).split("\n"))
self.assertEqual(input, output)
@TestStatus.shortLength
def testCactusCallPipes(self):
inputFile = getTempFile(rootDir=self.tempDir)
with open(inputFile, 'w') as f:
f.write('foobar\n')
# using 'cat' here rather than infile is intentional; it tests
# whether the directory is mounted into containers correctly.
output = cactus_call(parameters=[['cat', inputFile],
['sed', 's/foo/baz/g'],
['awk', '{ print "quux" $0 }']],
check_output=True)
self.assertEqual(output, 'quuxbazbar\n')
@TestStatus.mediumLength
def testChildTreeJob(self):
"""Check that the ChildTreeJob class runs all children."""
numChildren = 100
flagDir = getTempDirectory()
options = Job.Runner.getDefaultOptions(getTempDirectory())
shutil.rmtree(options.jobStore)
with Toil(options) as toil:
toil.start(CTTestParent(flagDir, numChildren))
# Check that all jobs ran
for i in range(numChildren):
self.assertTrue(os.path.exists(os.path.join(flagDir, str(i))))
shutil.rmtree(flagDir)
class CTTestParent(ChildTreeJob):
def __init__(self, flagDir, numChildren):
self.flagDir = flagDir
self.numChildren = numChildren
super(CTTestParent, self).__init__()
def run(self, fileStore):
for i in range(self.numChildren):
self.addChild(CTTestChild(self.flagDir, i))
class CTTestChild(Job):
def __init__(self, flagDir, index):
self.flagDir = flagDir
self.index = index
super(CTTestChild, self).__init__()
def run(self, fileStore):
# Mark that this job has run using a flag file
path = os.path.join(self.flagDir, str(self.index))
with open(path, 'w') as f:
# Empty file
f.write('')
if __name__ == '__main__':
unittest.main()
|
12096
|
import json
class TrainingSpecification:
template = """
{
"TrainingSpecification": {
"TrainingImage": "IMAGE_REPLACE_ME",
"SupportedHyperParameters": [
{
"Description": "Grow a tree with max_leaf_nodes in best-first fashion. Best nodes are defined as relative reduction in impurity. If None then unlimited number of leaf nodes",
"Name": "max_leaf_nodes",
"Type": "Integer",
"Range": {
"IntegerParameterRangeSpecification": {
"MinValue": "1",
"MaxValue": "100000"
}
},
"IsTunable": true,
"IsRequired": false,
"DefaultValue": "100"
}
],
"SupportedTrainingInstanceTypes": INSTANCES_REPLACE_ME,
"SupportsDistributedTraining": false,
"MetricDefinitions": METRICS_REPLACE_ME,
"TrainingChannels": CHANNELS_REPLACE_ME,
"SupportedTuningJobObjectiveMetrics": TUNING_OBJECTIVES_REPLACE_ME
}
}
"""
def get_training_specification_dict(
self,
ecr_image,
supports_gpu,
supported_channels=None,
supported_metrics=None,
supported_tuning_job_objective_metrics=None,
):
return json.loads(
self.get_training_specification_json(
ecr_image,
supports_gpu,
supported_channels,
supported_metrics,
supported_tuning_job_objective_metrics,
)
)
def get_training_specification_json(
self,
ecr_image,
supports_gpu,
supported_channels=None,
supported_metrics=None,
supported_tuning_job_objective_metrics=None,
):
if supported_channels is None:
print("Please provide at least one supported channel")
raise ValueError("Please provide at least one supported channel")
if supported_metrics is None:
supported_metrics = []
if supported_tuning_job_objective_metrics is None:
supported_tuning_job_objective_metrics = []
return (
self.template.replace("IMAGE_REPLACE_ME", ecr_image)
.replace("INSTANCES_REPLACE_ME", self.get_supported_instances(supports_gpu))
.replace(
"CHANNELS_REPLACE_ME",
json.dumps([ob.__dict__ for ob in supported_channels], indent=4, sort_keys=True),
)
.replace(
"METRICS_REPLACE_ME",
json.dumps([ob.__dict__ for ob in supported_metrics], indent=4, sort_keys=True),
)
.replace(
"TUNING_OBJECTIVES_REPLACE_ME",
json.dumps(
[ob.__dict__ for ob in supported_tuning_job_objective_metrics],
indent=4,
sort_keys=True,
),
)
)
@staticmethod
def get_supported_instances(supports_gpu):
cpu_list = [
"ml.m4.xlarge",
"ml.m4.2xlarge",
"ml.m4.4xlarge",
"ml.m4.10xlarge",
"ml.m4.16xlarge",
"ml.m5.large",
"ml.m5.xlarge",
"ml.m5.2xlarge",
"ml.m5.4xlarge",
"ml.m5.12xlarge",
"ml.m5.24xlarge",
"ml.c4.xlarge",
"ml.c4.2xlarge",
"ml.c4.4xlarge",
"ml.c4.8xlarge",
"ml.c5.xlarge",
"ml.c5.2xlarge",
"ml.c5.4xlarge",
"ml.c5.9xlarge",
"ml.c5.18xlarge",
]
gpu_list = [
"ml.p2.xlarge",
"ml.p2.8xlarge",
"ml.p2.16xlarge",
"ml.p3.2xlarge",
"ml.p3.8xlarge",
"ml.p3.16xlarge",
]
list_to_return = cpu_list
if supports_gpu:
list_to_return = cpu_list + gpu_list
return json.dumps(list_to_return)
|
12108
|
from django.conf import settings
from django.contrib.auth.decorators import user_passes_test
from django.core.paginator import Paginator
from django.core.urlresolvers import reverse
from django.http import Http404, HttpResponseRedirect, HttpResponse
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.utils.encoding import smart_unicode, iri_to_uri
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.cache import never_cache
from rosetta.conf import settings as rosetta_settings
from rosetta.polib import pofile
from rosetta.poutil import find_pos, pagination_range
from rosetta.signals import entry_changed, post_save
import re
import rosetta
import datetime
import unicodedata
import hashlib
import os
def home(request):
"""
Displays a list of messages to be translated
"""
def fix_nls(in_, out_):
"""Fixes submitted translations by filtering carriage returns and pairing
newlines at the begging and end of the translated string with the original
"""
if 0 == len(in_) or 0 == len(out_):
return out_
if "\r" in out_ and "\r" not in in_:
out_ = out_.replace("\r", '')
if "\n" == in_[0] and "\n" != out_[0]:
out_ = "\n" + out_
elif "\n" != in_[0] and "\n" == out_[0]:
out_ = out_.lstrip()
if "\n" == in_[-1] and "\n" != out_[-1]:
out_ = out_ + "\n"
elif "\n" != in_[-1] and "\n" == out_[-1]:
out_ = out_.rstrip()
return out_
version = rosetta.get_version(True)
if 'rosetta_i18n_fn' in request.session:
rosetta_i18n_fn = request.session.get('rosetta_i18n_fn')
rosetta_i18n_app = get_app_name(rosetta_i18n_fn)
rosetta_i18n_lang_code = request.session['rosetta_i18n_lang_code']
rosetta_i18n_lang_bidi = rosetta_i18n_lang_code.split('-')[0] in settings.LANGUAGES_BIDI
rosetta_i18n_write = request.session.get('rosetta_i18n_write', True)
if rosetta_i18n_write:
rosetta_i18n_pofile = pofile(rosetta_i18n_fn)
for entry in rosetta_i18n_pofile:
entry.md5hash = hashlib.md5(entry.msgid.encode("utf8") + entry.msgstr.encode("utf8")).hexdigest()
else:
rosetta_i18n_pofile = request.session.get('rosetta_i18n_pofile')
if 'filter' in request.GET:
if request.GET.get('filter') in ('untranslated', 'translated', 'fuzzy', 'all'):
filter_ = request.GET.get('filter')
request.session['rosetta_i18n_filter'] = filter_
return HttpResponseRedirect(reverse('rosetta-home'))
rosetta_i18n_filter = request.session.get('rosetta_i18n_filter', 'all')
if '_next' in request.POST:
rx = re.compile(r'^m_([0-9a-f]+)')
rx_plural = re.compile(r'^m_([0-9a-f]+)_([0-9]+)')
file_change = False
for key, value in request.POST.items():
md5hash = None
plural_id = None
if rx_plural.match(key):
md5hash = str(rx_plural.match(key).groups()[0])
# polib parses .po files into unicode strings, but
# doesn't bother to convert plural indexes to int,
# so we need unicode here.
plural_id = unicode(rx_plural.match(key).groups()[1])
elif rx.match(key):
md5hash = str(rx.match(key).groups()[0])
if md5hash is not None:
entry = rosetta_i18n_pofile.find(md5hash, 'md5hash')
# If someone did a makemessage, some entries might
# have been removed, so we need to check.
if entry:
old_msgstr = entry.msgstr
if plural_id is not None:
plural_string = fix_nls(entry.msgstr_plural[plural_id], value)
entry.msgstr_plural[plural_id] = plural_string
else:
entry.msgstr = fix_nls(entry.msgid, value)
is_fuzzy = bool(request.POST.get('f_%s' % md5hash, False))
old_fuzzy = 'fuzzy' in entry.flags
if old_fuzzy and not is_fuzzy:
entry.flags.remove('fuzzy')
elif not old_fuzzy and is_fuzzy:
entry.flags.append('fuzzy')
file_change = True
if old_msgstr != value or old_fuzzy != is_fuzzy:
entry_changed.send(sender=entry,
user=request.user,
old_msgstr=old_msgstr,
old_fuzzy=old_fuzzy,
pofile=rosetta_i18n_fn,
language_code=rosetta_i18n_lang_code,
)
else:
request.session['rosetta_last_save_error'] = True
if file_change and rosetta_i18n_write:
try:
# Provide defaults in case authorization is not required.
request.user.first_name = getattr(request.user, 'first_name', 'Anonymous')
request.user.last_name = getattr(request.user, 'last_name', 'User')
request.user.email = getattr(request.user, 'email', '<EMAIL>')
rosetta_i18n_pofile.metadata['Last-Translator'] = unicodedata.normalize('NFKD', u"%s %s <%s>" % (request.user.first_name, request.user.last_name, request.<EMAIL>.email)).encode('ascii', 'ignore')
rosetta_i18n_pofile.metadata['X-Translated-Using'] = u"django-rosetta %s" % rosetta.get_version(False)
rosetta_i18n_pofile.metadata['PO-Revision-Date'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M%z')
except UnicodeDecodeError:
pass
try:
rosetta_i18n_pofile.save()
rosetta_i18n_pofile.save_as_mofile(rosetta_i18n_fn.replace('.po', '.mo'))
post_save.send(sender=None, language_code=rosetta_i18n_lang_code, request=request)
# Try auto-reloading via the WSGI daemon mode reload mechanism
if rosetta_settings.WSGI_AUTO_RELOAD and \
'mod_wsgi.process_group' in request.environ and \
request.environ.get('mod_wsgi.process_group', None) and \
'SCRIPT_FILENAME' in request.environ and \
int(request.environ.get('mod_wsgi.script_reloading', '0')):
try:
os.utime(request.environ.get('SCRIPT_FILENAME'), None)
except OSError:
pass
# Try auto-reloading via uwsgi daemon reload mechanism
if rosetta_settings.UWSGI_AUTO_RELOAD:
try:
import uwsgi
# pretty easy right?
uwsgi.reload()
except:
# we may not be running under uwsgi :P
pass
except:
request.session['rosetta_i18n_write'] = False
request.session['rosetta_i18n_pofile'] = rosetta_i18n_pofile
# Retain query arguments
query_arg = ''
if 'query' in request.REQUEST:
query_arg = '?query=%s' % request.REQUEST.get('query')
if 'page' in request.GET:
if query_arg:
query_arg = query_arg + '&'
else:
query_arg = '?'
query_arg = query_arg + 'page=%d' % int(request.GET.get('page'))
return HttpResponseRedirect(reverse('rosetta-home') + iri_to_uri(query_arg))
rosetta_i18n_lang_name = _(request.session.get('rosetta_i18n_lang_name'))
rosetta_i18n_lang_code = request.session.get('rosetta_i18n_lang_code')
if 'query' in request.REQUEST and request.REQUEST.get('query', '').strip():
query = request.REQUEST.get('query').strip()
rx = re.compile(re.escape(query), re.IGNORECASE)
paginator = Paginator([e for e in rosetta_i18n_pofile if not e.obsolete and rx.search(smart_unicode(e.msgstr) + smart_unicode(e.msgid) + u''.join([o[0] for o in e.occurrences]))], rosetta_settings.MESSAGES_PER_PAGE)
else:
if rosetta_i18n_filter == 'untranslated':
paginator = Paginator(rosetta_i18n_pofile.untranslated_entries(), rosetta_settings.MESSAGES_PER_PAGE)
elif rosetta_i18n_filter == 'translated':
paginator = Paginator(rosetta_i18n_pofile.translated_entries(), rosetta_settings.MESSAGES_PER_PAGE)
elif rosetta_i18n_filter == 'fuzzy':
paginator = Paginator([e for e in rosetta_i18n_pofile.fuzzy_entries() if not e.obsolete], rosetta_settings.MESSAGES_PER_PAGE)
else:
paginator = Paginator([e for e in rosetta_i18n_pofile if not e.obsolete], rosetta_settings.MESSAGES_PER_PAGE)
if 'page' in request.GET and int(request.GET.get('page')) <= paginator.num_pages and int(request.GET.get('page')) > 0:
page = int(request.GET.get('page'))
else:
page = 1
messages = paginator.page(page).object_list
if rosetta_settings.MAIN_LANGUAGE and rosetta_settings.MAIN_LANGUAGE != rosetta_i18n_lang_code:
main_language = None
for language in settings.LANGUAGES:
if language[0] == rosetta_settings.MAIN_LANGUAGE:
main_language = _(language[1])
break
fl = ("/%s/" % rosetta_settings.MAIN_LANGUAGE).join(rosetta_i18n_fn.split("/%s/" % rosetta_i18n_lang_code))
po = pofile(fl)
main_messages = []
for message in messages:
message.main_lang = po.find(message.msgid).msgstr
needs_pagination = paginator.num_pages > 1
if needs_pagination:
if paginator.num_pages >= 10:
page_range = pagination_range(1, paginator.num_pages, page)
else:
page_range = range(1, 1 + paginator.num_pages)
ADMIN_MEDIA_PREFIX = settings.STATIC_URL
ENABLE_TRANSLATION_SUGGESTIONS = rosetta_settings.BING_APP_ID and rosetta_settings.ENABLE_TRANSLATION_SUGGESTIONS
BING_APP_ID = rosetta_settings.BING_APP_ID
MESSAGES_SOURCE_LANGUAGE_NAME = rosetta_settings.MESSAGES_SOURCE_LANGUAGE_NAME
MESSAGES_SOURCE_LANGUAGE_CODE = rosetta_settings.MESSAGES_SOURCE_LANGUAGE_CODE
if 'rosetta_last_save_error' in request.session:
del(request.session['rosetta_last_save_error'])
rosetta_last_save_error = True
return render_to_response('rosetta/pofile.html', locals(), context_instance=RequestContext(request))
else:
return list_languages(request)
home = never_cache(home)
home = user_passes_test(lambda user: can_translate(user), settings.LOGIN_URL)(home)
def download_file(request):
import zipfile
from StringIO import StringIO
# original filename
rosetta_i18n_fn = request.session.get('rosetta_i18n_fn', None)
# in-session modified catalog
rosetta_i18n_pofile = request.session.get('rosetta_i18n_pofile', None)
# language code
rosetta_i18n_lang_code = request.session.get('rosetta_i18n_lang_code', None)
if not rosetta_i18n_lang_code or not rosetta_i18n_pofile or not rosetta_i18n_fn:
return HttpResponseRedirect(reverse('rosetta-home'))
try:
if len(rosetta_i18n_fn.split('/')) >= 5:
offered_fn = '_'.join(rosetta_i18n_fn.split('/')[-5:])
else:
offered_fn = rosetta_i18n_fn.split('/')[-1]
po_fn = str(rosetta_i18n_fn.split('/')[-1])
mo_fn = str(po_fn.replace('.po', '.mo')) # not so smart, huh
zipdata = StringIO()
zipf = zipfile.ZipFile(zipdata, mode="w")
zipf.writestr(po_fn, unicode(rosetta_i18n_pofile).encode("utf8"))
zipf.writestr(mo_fn, rosetta_i18n_pofile.to_binary())
zipf.close()
zipdata.seek(0)
response = HttpResponse(zipdata.read())
response['Content-Disposition'] = 'attachment; filename=%s.%s.zip' % (offered_fn, rosetta_i18n_lang_code)
response['Content-Type'] = 'application/x-zip'
return response
except Exception:
return HttpResponseRedirect(reverse('rosetta-home'))
download_file = never_cache(download_file)
download_file = user_passes_test(lambda user: can_translate(user), settings.LOGIN_URL)(download_file)
def list_languages(request):
"""
Lists the languages for the current project, the gettext catalog files
that can be translated and their translation progress
"""
languages = []
if 'filter' in request.GET:
if request.GET.get('filter') in ('project', 'third-party', 'django', 'all'):
filter_ = request.GET.get('filter')
request.session['rosetta_i18n_catalog_filter'] = filter_
return HttpResponseRedirect(reverse('rosetta-pick-file'))
rosetta_i18n_catalog_filter = request.session.get('rosetta_i18n_catalog_filter', 'project')
third_party_apps = rosetta_i18n_catalog_filter in ('all', 'third-party')
django_apps = rosetta_i18n_catalog_filter in ('all', 'django')
project_apps = rosetta_i18n_catalog_filter in ('all', 'project')
has_pos = False
for language in settings.LANGUAGES:
pos = find_pos(language[0], project_apps=project_apps, django_apps=django_apps, third_party_apps=third_party_apps)
has_pos = has_pos or len(pos)
languages.append(
(language[0],
_(language[1]),
[(get_app_name(l), os.path.realpath(l), pofile(l)) for l in pos],
)
)
ADMIN_MEDIA_PREFIX = settings.STATIC_URL
version = rosetta.get_version(True)
return render_to_response('rosetta/languages.html', locals(), context_instance=RequestContext(request))
list_languages = never_cache(list_languages)
list_languages = user_passes_test(lambda user: can_translate(user), settings.LOGIN_URL)(list_languages)
def get_app_name(path):
app = path.split("/locale")[0].split("/")[-1]
return app
def lang_sel(request, langid, idx):
"""
Selects a file to be translated
"""
if langid not in [l[0] for l in settings.LANGUAGES]:
raise Http404
else:
rosetta_i18n_catalog_filter = request.session.get('rosetta_i18n_catalog_filter', 'project')
third_party_apps = rosetta_i18n_catalog_filter in ('all', 'third-party')
django_apps = rosetta_i18n_catalog_filter in ('all', 'django')
project_apps = rosetta_i18n_catalog_filter in ('all', 'project')
file_ = find_pos(langid, project_apps=project_apps, django_apps=django_apps, third_party_apps=third_party_apps)[int(idx)]
request.session['rosetta_i18n_lang_code'] = langid
request.session['rosetta_i18n_lang_name'] = unicode([l[1] for l in settings.LANGUAGES if l[0] == langid][0])
request.session['rosetta_i18n_fn'] = file_
po = pofile(file_)
for entry in po:
entry.md5hash = hashlib.md5(entry.msgid.encode("utf8") + entry.msgstr.encode("utf8")).hexdigest()
request.session['rosetta_i18n_pofile'] = po
try:
os.utime(file_, None)
request.session['rosetta_i18n_write'] = True
except OSError:
request.session['rosetta_i18n_write'] = False
return HttpResponseRedirect(reverse('rosetta-home'))
lang_sel = never_cache(lang_sel)
lang_sel = user_passes_test(lambda user: can_translate(user), settings.LOGIN_URL)(lang_sel)
def can_translate(user):
if not getattr(settings, 'ROSETTA_REQUIRES_AUTH', True):
return True
if not user.is_authenticated():
return False
elif user.is_superuser and user.is_staff:
return True
else:
try:
from django.contrib.auth.models import Group
translators = Group.objects.get(name='translators')
return translators in user.groups.all()
except Group.DoesNotExist:
return False
|
12124
|
from typing import TypedDict
from .utils.Classes.String import String
from .utils.assert_string import assert_string
from .utils.merge import merge
class _IsStrongPasswordOptions(TypedDict):
min_length: int
min_uppercase: int
min_lowercase: int
min_numbers: int
min_symbols: int
return_score: bool
points_per_unique: int
points_per_repeat: float
points_for_containing_upper: int
points_for_containing_lower: int
points_for_containing_number: int
points_for_containing_symbol: int
class _Analysis(TypedDict):
length: int
unique_chars: int
uppercase_count: int
lowercase_count: int
number_count: int
symbol_count: int
default_options: _IsStrongPasswordOptions = {
"min_length": 8,
"min_uppercase": 1,
"min_lowercase": 1,
"min_numbers": 1,
"min_symbols": 1,
"return_score": False,
"points_per_unique": 1,
"points_per_repeat": 0.5,
"points_for_containing_lower": 10,
"points_for_containing_upper": 10,
"points_for_containing_number": 10,
"points_for_containing_symbol": 10,
}
def count_chars(pw: String):
result = {}
for char in pw:
if char in result:
result[char] += result[char] + 1
else:
result[char] = 1
return result
def analyze_password(pw: String) -> _Analysis:
upper_case_regex = r"^[A-Z]$"
lower_case_regex = r"^[a-z]$"
number_regex = r"^[0-9]$"
symbol_regex = r"^[-#!$@%^&*()_+|~=`{}\[\]:\";'<>?,./ ]$"
char_map = count_chars(pw)
analysis: _Analysis = {
"length": pw.length,
"unique_chars": len([*char_map]),
"uppercase_count": 0,
"lowercase_count": 0,
"number_count": 0,
"symbol_count": 0,
}
for char in [*char_map]:
char = String(char)
if char.match(upper_case_regex):
analysis["uppercase_count"] += char_map[char]
elif char.match(lower_case_regex):
analysis["lowercase_count"] += char_map[char]
elif char.match(number_regex):
analysis["number_count"] += char_map[char]
elif char.match(symbol_regex):
analysis["symbol_count"] += char_map[char]
return analysis
def score_password(analysis: _Analysis, options: _IsStrongPasswordOptions):
points = 0
points += analysis["unique_chars"] * options["points_per_unique"]
points += (analysis["length"] - analysis["unique_chars"]) * options["points_per_unique"]
if analysis["uppercase_count"] > 0:
points += options["points_for_containing_upper"]
if analysis["lowercase_count"] > 0:
points += options["points_for_containing_lower"]
if analysis["number_count"] > 0:
points += options["points_for_containing_number"]
if analysis["symbol_count"] > 0:
points += options["points_for_containing_symbol"]
return points
def is_strong_password(input: str, options: _IsStrongPasswordOptions = {}) -> bool:
input = assert_string(input)
options = merge(options, default_options)
analysis = analyze_password(input)
if options["return_score"]:
return score_password(analysis, options)
return (
analysis["length"] >= options["min_length"] and
analysis["uppercase_count"] >= options["min_uppercase"] and
analysis["lowercase_count"] >= options["min_lowercase"] and
analysis["number_count"] >= options["min_numbers"] and
analysis["symbol_count"] >= options["min_symbols"]
)
|
12130
|
import pytest
@pytest.fixture(scope="session")
def test_data():
from pathlib import Path
module_dir = Path(__file__).resolve().parent
test_dir = module_dir / "test_data"
return test_dir.resolve()
@pytest.fixture(scope="session")
def database():
return "jobflow_test"
@pytest.fixture(scope="session")
def mongo_jobstore(database):
from maggma.stores import MongoStore
from jobflow import JobStore
store = JobStore(MongoStore(database, "outputs"))
store.connect()
return store
@pytest.fixture(scope="function")
def memory_jobstore():
from maggma.stores import MemoryStore
from jobflow import JobStore
store = JobStore(MemoryStore())
store.connect()
return store
@pytest.fixture(scope="function")
def memory_data_jobstore():
from maggma.stores import MemoryStore
from jobflow import JobStore
store = JobStore(MemoryStore(), additional_stores={"data": MemoryStore()})
store.connect()
return store
@pytest.fixture
def clean_dir():
import os
import shutil
import tempfile
old_cwd = os.getcwd()
newpath = tempfile.mkdtemp()
os.chdir(newpath)
yield
os.chdir(old_cwd)
shutil.rmtree(newpath)
@pytest.fixture(scope="session")
def debug_mode():
return False
@pytest.fixture(scope="session")
def lpad(database, debug_mode):
from fireworks import LaunchPad
lpad = LaunchPad(name=database)
lpad.reset("", require_password=False)
yield lpad
if not debug_mode:
lpad.reset("", require_password=False)
for coll in lpad.db.list_collection_names():
lpad.db[coll].drop()
@pytest.fixture
def no_pydot(monkeypatch):
import builtins
import_orig = builtins.__import__
def mocked_import(name, *args, **kwargs):
if name == "pydot":
raise ImportError()
return import_orig(name, *args, **kwargs)
monkeypatch.setattr(builtins, "__import__", mocked_import)
@pytest.fixture
def no_matplotlib(monkeypatch):
import builtins
import_orig = builtins.__import__
def mocked_import(name, *args, **kwargs):
if name == "matplotlib":
raise ImportError()
return import_orig(name, *args, **kwargs)
monkeypatch.setattr(builtins, "__import__", mocked_import)
|
12147
|
from __future__ import unicode_literals
from __future__ import print_function
import moya
from moya.compat import text_type
from requests_oauthlib import OAuth1Session
def get_credentials(provider, credentials):
client_id = credentials.client_id or provider.get('client_id', None)
client_secret = credentials.client_secret or provider.get('client_secret', None)
return client_id, client_secret
@moya.expose.macro('get_oauth_resource_owner')
def get_oauth_resource_owner(app, provider, credentials):
client_id, client_secret = get_credentials(provider, credentials)
oauth = OAuth1Session(client_id, client_secret=client_secret)
request_token_url = provider['request_token_url']
response = oauth.fetch_request_token(request_token_url)
resource_owner_key = response.get('oauth_token')
resource_owner_secret = response.get('oauth_token_secret')
result = {
"key": resource_owner_key,
"secret": resource_owner_secret
}
return result
@moya.expose.macro('get_oauth_authorize_url')
def get_oauth_authorize_url(app, provider, credentials):
context = moya.pilot.context
client_id, client_secret = get_credentials(provider, credentials)
resource_owner_key = context['.session.oauth1.resource_owner.key']
resource_owner_secret = context['.session.oauth1.resource_owner.secret']
oauth = OAuth1Session(client_id,
client_secret=client_secret,
resource_owner_key=resource_owner_key,
resource_owner_secret=resource_owner_secret)
authorization_url = oauth.authorization_url(provider['authorization_base_url'])
return authorization_url
@moya.expose.macro('get_oauth_access_token')
def get_oauth_access_token(app, provider, credentials, verifier):
context = moya.pilot.context
client_id, client_secret = get_credentials(provider, credentials)
resource_owner_key = context['.session.oauth1.resource_owner.key']
resource_owner_secret = context['.session.oauth1.resource_owner.secret']
oauth = OAuth1Session(client_id,
client_secret=client_secret,
resource_owner_key=resource_owner_key,
resource_owner_secret=resource_owner_secret,
verifier=verifier)
access_token_url = provider['access_token_url']
oauth_tokens = oauth.fetch_access_token(access_token_url)
return oauth_tokens
@moya.expose.macro('get_oauth_profile')
def get_oauth_profile(app, provider, credentials, verifier):
context = moya.pilot.context
client_id, client_secret = get_credentials(provider, credentials)
resource_owner_key = context['.session.oauth1.resource_owner.key']
resource_owner_secret = context['.session.oauth1.resource_owner.secret']
resources = provider.get('resources', {})
session = OAuth1Session(client_id,
client_secret=client_secret,
resource_owner_key=resource_owner_key,
resource_owner_secret=resource_owner_secret,
verifier=verifier)
access_token_url = provider['access_token_url']
try:
oauth_tokens = session.fetch_access_token(access_token_url)
except Exception as e:
app.throw('moya.logins.access-fail',
text_type(e))
info = {}
for scope, scope_url in sorted(resources.items()):
try:
response = session.get(scope_url)
except Exception as e:
app.throw('moya.logins.get-scope-fail',
text_type(e),
diagnosis="There may be a connectivity issue getting scope information.",
scope=scope,
scope_url=scope_url)
try:
info[scope] = scope_data = response.json()
#if(context['.debug']):
# context['.console'].obj(context, scope_data)
except:
pass
provider_profile = provider.get('profile', {})
profile = {}
context['_oauth_info'] = info
with context.frame('_oauth_info'):
for k, v in provider_profile.items():
try:
profile[k] = context.eval(v)
except:
pass
return {'profile': profile, 'info': info}
|
12170
|
import asyncio
import logging
import traceback
import uuid
from typing import Optional, Tuple, Any, Callable
from pesto.ws.core.payload_parser import PayloadParser, PestoConfig
from pesto.ws.core.pesto_feature import PestoFeatures
from pesto.ws.core.utils import load_class, async_exec
from pesto.ws.features.algorithm_wrapper import AlgorithmWrapper
from pesto.ws.features.converter.image.image_roi import ImageROI, DummyImageROI
from pesto.ws.features.payload_converter import PayloadConverter
from pesto.ws.features.payload_debug import PayloadDebug
from pesto.ws.features.response_serializer import ResponseSerializer
from pesto.ws.features.schema_validation import SchemaValidation
from pesto.ws.features.stateful_response import StatefulResponse
from pesto.ws.features.stateless_response import StatelessResponse
from pesto.ws.service.describe import DescribeService
from pesto.ws.service.job_result import ResultType
log = logging.getLogger(__name__)
class ProcessService:
PROCESS_CLASS_NAME = 'algorithm.process.Process'
_algorithm: Optional[Callable] = None
_describe = None
@staticmethod
def init():
if ProcessService._algorithm is not None:
raise ValueError('Process Service already loaded !')
try:
log.info('ProcessService.init() ...')
ProcessService._algorithm = load_class(ProcessService.PROCESS_CLASS_NAME)()
if hasattr(ProcessService._algorithm, 'on_start'):
log.info('ProcessService.on_start() ...')
ProcessService._algorithm.on_start()
log.info('ProcessService.on_start() ... Done !')
log.info('ProcessService.init() ... Done !')
except:
traceback.print_exc()
log.warning('Algorithm {}.on_start() failure !'.format(ProcessService.PROCESS_CLASS_NAME))
def __init__(self, url_root: str):
self.url_root = url_root
@property
def service_description(self):
if ProcessService._describe is None:
ProcessService._describe = DescribeService(self.url_root).compute_describe()
return ProcessService._describe
def process(self, payload: dict) -> dict:
config = PayloadParser.parse(payload)
image_roi: Optional[ImageROI] = config.get(PestoConfig.roi) # if no ROI: None
active_roi: ImageROI = image_roi or DummyImageROI() # bypass compute crop info and remove margins in pipeline
job_id = str(uuid.uuid4().time_low)
is_stateful = self.service_description['asynchronous'] is True
input_schema = self.service_description['input']
output_schema = self.service_description['output']
common_pipeline = filter(None, [
SchemaValidation(schema=input_schema),
active_roi.compute_crop_infos(),
PayloadConverter(image_roi=image_roi, schema=input_schema),
PayloadDebug(schema=input_schema),
AlgorithmWrapper(ProcessService._algorithm),
active_roi.remove_margin(),
ResponseSerializer(schema=output_schema, job_id=job_id),
])
if is_stateful:
pipeline = [
*common_pipeline,
StatefulResponse(self.url_root, job_id)
]
else:
pipeline = [
*common_pipeline,
StatelessResponse(self.url_root, job_id, output_schema)
]
return PestoFeatures(pipeline).process(payload)
async def async_process(self, request_payload: dict) -> Tuple[Any, ResultType]:
return await asyncio.wait_for(
async_exec(lambda: self.process(request_payload)),
timeout=None
)
|
12172
|
import configparser
import os
import typing
from sitri.providers.base import ConfigProvider
class IniConfigProvider(ConfigProvider):
"""Config provider for Initialization file (Ini)."""
provider_code = "ini"
def __init__(
self,
ini_path: str = "./config.ini",
):
"""
:param ini_path: path to ini file
"""
self.configparser = configparser.ConfigParser()
with open(os.path.abspath(ini_path)) as f:
self.configparser.read_file(f)
self._sections = None
@property
def sections(self):
if not self._sections:
self._sections = list(self.configparser.keys())
return self._sections
def get(self, key: str, section: str, **kwargs) -> typing.Optional[typing.Any]: # type: ignore
"""Get value from ini file.
:param key: key or path for search
:param section: section of ini file
"""
if section not in self.sections:
return None
return self.configparser[section].get(key)
def keys(self, section: str, **kwargs) -> typing.List[str]: # type: ignore
"""Get keys of section.
:param section: section of ini file
"""
if section not in self.sections:
return []
return list(self.configparser[section].keys())
|
12175
|
from kinetics.reaction_classes.reaction_base_class import Reaction
class Generic(Reaction):
"""
This Reaction class allows you to specify your own rate equation.
Enter the parameter names in params, and the substrate names used in the reaction in species.
Type the rate equation as a string in rate_equation, using these same names.
Enter the substrates used up, and the products made in the reaction as normal.
"""
def __init__(self,
params=[], species=[],
rate_equation='',
substrates=[], products=[]):
super().__init__()
self.reaction_substrate_names = species
self.parameter_names=params
self.rate_equation = rate_equation
self.substrates = substrates
self.products = products
def calculate_rate(self, substrates, parameters):
for i, name in enumerate(self.reaction_substrate_names):
locals().update({name: substrates[i]})
for i, name in enumerate(self.parameter_names):
locals().update({name: parameters[i]})
rate = eval(self.rate_equation, locals(), globals())
return rate
|
12180
|
import json
import requests
from .exceptions import (
RequestsError,
RequestsTimeoutError,
RPCError
)
_default_endpoint = 'http://localhost:9500'
_default_timeout = 30
def base_request(method, params=None, endpoint=_default_endpoint, timeout=_default_timeout) -> str:
"""
Basic RPC request
Parameters
---------
method: str
RPC Method to call
params: :obj:`list`, optional
Parameters for the RPC method
endpoint: :obj:`str`, optional
Endpoint to send request to
timeout: :obj:`int`, optional
Timeout in seconds
Returns
-------
str
Raw output from the request
Raises
------
TypeError
If params is not a list or None
RequestsTimeoutError
If request timed out
RequestsError
If other request error occured
"""
if params is None:
params = []
elif not isinstance(params, list):
raise TypeError(f'invalid type {params.__class__}')
try:
payload = {
"id": "1",
"jsonrpc": "2.0",
"method": method,
"params": params
}
headers = {
'Content-Type': 'application/json'
}
resp = requests.request('POST', endpoint, headers=headers, data=json.dumps(payload),
timeout=timeout, allow_redirects=True)
return resp.content
except requests.exceptions.Timeout as err:
raise RequestsTimeoutError(endpoint) from err
except requests.exceptions.RequestException as err:
raise RequestsError(endpoint) from err
def rpc_request(method, params=None, endpoint=_default_endpoint, timeout=_default_timeout) -> dict:
"""
RPC request
Parameters
---------
method: str
RPC Method to call
params: :obj:`list`, optional
Parameters for the RPC method
endpoint: :obj:`str`, optional
Endpoint to send request to
timeout: :obj:`int`, optional
Timeout in seconds
Returns
-------
dict
Returns dictionary representation of RPC response
Example format:
{
"jsonrpc": "2.0",
"id": 1,
"result": ...
}
Raises
------
RPCError
If RPC response returned a blockchain error
See Also
--------
base_request
"""
raw_resp = base_request(method, params, endpoint, timeout)
try:
resp = json.loads(raw_resp)
if 'error' in resp:
raise RPCError(method, endpoint, str(resp['error']))
return resp
except json.decoder.JSONDecodeError as err:
raise RPCError(method, endpoint, raw_resp) from err
# TODO: Add GET requests
|
12184
|
from __future__ import print_function
import argparse, sys
from .utils import is_textfile
def contains_crlf(filename):
with open(filename, mode='rb') as file_checked:
for line in file_checked.readlines():
if line.endswith(b'\r\n'):
return True
return False
def main(argv=None):
parser = argparse.ArgumentParser()
parser.add_argument('filenames', nargs='*', help='filenames to check')
args = parser.parse_args(argv)
text_files = [f for f in args.filenames if is_textfile(f)]
files_with_crlf = [f for f in text_files if contains_crlf(f)]
return_code = 0
for file_with_crlf in files_with_crlf:
print('CRLF end-lines detected in file: {0}'.format(file_with_crlf))
return_code = 1
return return_code
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
12200
|
from KratosMultiphysics import ParallelEnvironment, IsDistributedRun
if IsDistributedRun():
from KratosMultiphysics.mpi import DataCommunicatorFactory
import KratosMultiphysics.KratosUnittest as UnitTest
import math
class TestDataCommunicatorFactory(UnitTest.TestCase):
def setUp(self):
self.registered_comms = []
self.default_data_communicator = ParallelEnvironment.GetDefaultDataCommunicator()
self.original_default = ParallelEnvironment.GetDefaultDataCommunicatorName()
def tearDown(self):
if len(self.registered_comms) > 0:
ParallelEnvironment.SetDefaultDataCommunicator(self.original_default)
for comm_name in self.registered_comms:
ParallelEnvironment.UnregisterDataCommunicator(comm_name)
def markForCleanUp(self,comm_name):
self.registered_comms.append(comm_name)
@UnitTest.skipUnless(IsDistributedRun(), "Test is distributed.")
def testDataCommunicatorDuplication(self):
duplicate_comm = DataCommunicatorFactory.DuplicateAndRegister(self.default_data_communicator, "Duplicate")
self.markForCleanUp("Duplicate") # to clean up during tearDown
self.assertEqual(duplicate_comm.Rank(), self.default_data_communicator.Rank())
self.assertEqual(duplicate_comm.Size(), self.default_data_communicator.Size())
@UnitTest.skipUnless(IsDistributedRun(), "Test is distributed.")
def testDataCommunicatorSplit(self):
rank = self.default_data_communicator.Rank()
size = self.default_data_communicator.Size()
split_comm = DataCommunicatorFactory.SplitAndRegister(self.default_data_communicator, rank % 2, 0, "EvenOdd")
self.markForCleanUp("EvenOdd") # to clean up during tearDown
expected_rank = rank // 2
if rank % 2 == 0:
expected_size = math.ceil(size/2)
else:
expected_size = math.floor(size/2)
self.assertEqual(split_comm.Rank(), expected_rank)
self.assertEqual(split_comm.Size(), expected_size)
@UnitTest.skipUnless(IsDistributedRun() and ParallelEnvironment.GetDefaultSize() > 1, "Test requires at least two ranks.")
def testDataCommunicatorCreateFromRange(self):
rank = self.default_data_communicator.Rank()
size = self.default_data_communicator.Size()
# Create a communicator using all ranks except the first
ranks = [i for i in range(1,size)]
range_comm = DataCommunicatorFactory.CreateFromRanksAndRegister(self.default_data_communicator, ranks, "AllExceptFirst")
self.markForCleanUp("AllExceptFirst") # to clean up during tearDown
if rank == 0:
self.assertTrue(range_comm.IsNullOnThisRank())
self.assertFalse(range_comm.IsDefinedOnThisRank())
else:
self.assertEqual(range_comm.Rank(), rank-1)
self.assertEqual(range_comm.Size(), size-1)
@UnitTest.skipUnless(IsDistributedRun() and ParallelEnvironment.GetDefaultSize() > 2, "Test requires at least three ranks.")
def testDataCommunicatorCreateUnion(self):
rank = self.default_data_communicator.Rank()
size = self.default_data_communicator.Size()
# Create a communicator using all ranks except the first
all_except_first = DataCommunicatorFactory.CreateFromRanksAndRegister(self.default_data_communicator, [i for i in range(1,size)], "AllExceptFirst")
self.markForCleanUp("AllExceptFirst") # to clean up during tearDown
all_except_last = DataCommunicatorFactory.CreateFromRanksAndRegister(self.default_data_communicator, [i for i in range(0,size-1)], "AllExceptLast")
self.markForCleanUp("AllExceptLast") # to clean up during tearDown
# Create union communicator (should contain all ranks)
union_comm = DataCommunicatorFactory.CreateUnionAndRegister(all_except_first, all_except_last, self.default_data_communicator, "Union")
self.markForCleanUp("Union") # to clean up during tearDown
self.assertFalse(union_comm.IsNullOnThisRank())
self.assertEqual(union_comm.Rank(), rank)
self.assertEqual(union_comm.Size(), size)
@UnitTest.skipUnless(IsDistributedRun() and ParallelEnvironment.GetDefaultSize() > 2, "Test requires at least three ranks.")
def testDataCommunicatorCreateIntersection(self):
rank = self.default_data_communicator.Rank()
size = self.default_data_communicator.Size()
# Create a communicator using all ranks except the first
all_except_first = DataCommunicatorFactory.CreateFromRanksAndRegister(self.default_data_communicator, [i for i in range(1,size)], "AllExceptFirst")
self.markForCleanUp("AllExceptFirst") # to clean up during tearDown
all_except_last = DataCommunicatorFactory.CreateFromRanksAndRegister(self.default_data_communicator, [i for i in range(0,size-1)], "AllExceptLast")
self.markForCleanUp("AllExceptLast") # to clean up during tearDown
intersection_comm = DataCommunicatorFactory.CreateIntersectionAndRegister(
all_except_first, all_except_last, self.default_data_communicator, "Intersection")
self.markForCleanUp("Intersection") # to clean up during tearDown
if rank == 0 or rank == size - 1:
# The first and last ranks do not participate in the intersection communicator
self.assertTrue(intersection_comm.IsNullOnThisRank())
else:
self.assertEqual(intersection_comm.Rank(), rank - 1 )
self.assertEqual(intersection_comm.Size(), size - 2 )
if __name__ == "__main__":
UnitTest.main()
|
12201
|
from __future__ import print_function
try:
import vkaudiotoken
except ImportError:
import path_hack
from vkaudiotoken import supported_clients
import sys
import requests
import json
token = sys.argv[1]
user_agent = supported_clients.KATE.user_agent
sess = requests.session()
sess.headers.update({'User-Agent': user_agent})
def prettyprint(result):
print(json.dumps(json.loads(result.content.decode('utf-8')), indent=2))
prettyprint(sess.get(
"https://api.vk.com/method/audio.getById",
params=[('access_token', token),
('audios', '371745461_456289486,-41489995_202246189'),
('v', '5.95')]
))
|
12205
|
import pytest
from Thycotic import Client, \
secret_password_get_command, secret_username_get_command, \
secret_get_command, secret_password_update_command, secret_checkout_command, secret_checkin_command, \
secret_delete_command, folder_create_command, folder_delete_command, folder_update_command
from test_data.context import GET_PASSWORD_BY_ID_CONTEXT, GET_USERNAME_BY_ID_CONTENT, \
SECRET_GET_CONTENT, SECRET_PASSWORD_UPDATE_CONTEXT, SECRET_CHECKOUT_CONTEXT, SECRET_CHECKIN_CONTEXT, \
SECRET_DELETE_CONTEXT, FOLDER_CREATE_CONTEXT, FOLDER_DELETE_CONTEXT, FOLDER_UPDATE_CONTEXT
from test_data.http_responses import GET_PASSWORD_BY_ID_RAW_RESPONSE, GET_USERNAME_BY_ID_RAW_RESPONSE, \
SECRET_GET_RAW_RESPONSE, SECRET_PASSWORD_UPDATE_RAW_RESPONSE, SECRET_CHECKOUT_RAW_RESPONSE, \
SECRET_CHECKIN_RAW_RESPONSE, SECRET_DELETE_RAW_RESPONSE, FOLDER_CREATE_RAW_RESPONSE, FOLDER_DELETE_RAW_RESPONSE, \
FOLDER_UPDATE_RAW_RESPONSE
GET_PASSWORD_BY_ID_ARGS = {"secret_id": "4"}
GET_USERNAME_BY_ID_ARGS = {"secret_id": "4"}
SECRET_GET_ARGS = {"secret_id": "4"}
SECRET_PASSWORD_UPDATE_ARGS = {"secret_id": "4", "newpassword": "<PASSWORD>"}
SECRET_CHECKOUT_ARGS = {"secret_id": "4"}
SECRET_CHECKIN_ARGS = {"secret_id": "4"}
SECRET_DELETE_ARGS = {"id": "9"}
FOLDER_CREATE_ARGS = {"folderName": "xsoarFolderTest3", "folderTypeId": "1", "parentFolderId": "3"}
FOLDER_DELETE_ARGS = {"folder_id": "9"}
FOLDER_UPDATE_ARGS = {"id": "12", "folderName": "xsoarTF3New"}
@pytest.mark.parametrize('command, args, http_response, context', [
(secret_password_get_command, GET_PASSWORD_BY_ID_ARGS, GET_PASSWORD_BY_ID_RAW_RESPONSE, GET_PASSWORD_BY_ID_CONTEXT),
(secret_username_get_command, GET_USERNAME_BY_ID_ARGS, GET_USERNAME_BY_ID_RAW_RESPONSE, GET_USERNAME_BY_ID_CONTENT),
(secret_get_command, SECRET_GET_ARGS, SECRET_GET_RAW_RESPONSE, SECRET_GET_CONTENT),
(secret_password_update_command, SECRET_PASSWORD_UPDATE_ARGS, SECRET_PASSWORD_UPDATE_RAW_RESPONSE,
SECRET_PASSWORD_UPDATE_CONTEXT),
(secret_checkout_command, SECRET_CHECKOUT_ARGS, SECRET_CHECKOUT_RAW_RESPONSE, SECRET_CHECKOUT_CONTEXT),
(secret_checkin_command, SECRET_CHECKIN_ARGS, SECRET_CHECKIN_RAW_RESPONSE, SECRET_CHECKIN_CONTEXT),
(secret_delete_command, SECRET_DELETE_ARGS, SECRET_DELETE_RAW_RESPONSE, SECRET_DELETE_CONTEXT),
(folder_create_command, FOLDER_CREATE_ARGS, FOLDER_CREATE_RAW_RESPONSE, FOLDER_CREATE_CONTEXT),
(folder_delete_command, FOLDER_DELETE_ARGS, FOLDER_DELETE_RAW_RESPONSE, FOLDER_DELETE_CONTEXT),
(folder_update_command, FOLDER_UPDATE_ARGS, FOLDER_UPDATE_RAW_RESPONSE, FOLDER_UPDATE_CONTEXT)
])
def test_thycotic_commands(command, args, http_response, context, mocker):
mocker.patch.object(Client, '_generate_token')
client = Client(server_url="https://thss.softwarium.net/SecretServer", username="xsoar1", password="<PASSWORD>",
proxy=False, verify=False)
mocker.patch.object(Client, '_http_request', return_value=http_response)
outputs = command(client, **args)
results = outputs.to_context()
assert results.get("EntryContext") == context
|
12215
|
import numpy as np
from radix import radixConvert
c = radixConvert()
a = np.load("../../data/5/layer4.npy")
print(a.shape)
a = a*128
a = np.around(a).astype(np.int16)
print(a)
a = np.load('../../data/6.npy')
a = a*128
a = np.around(a).astype(np.int8)
print(a.shape)
for i in range(84):
print(i)
print(a[i])
'''
a = a*128
print(a)
for i in range(a.shape[0]):
for j in range(a.shape[1]):
if a[i][j] > 127:
a[i][j] = 127
a = np.around(a).astype(np.int8)
print(a)
print(a[4][17])
weight_file = open('f1_rom.coe', 'w')
weight_file.write('MEMORY_INITIALIZATION_RADIX=2;\n')
weight_file.write('MEMORY_INITIALIZATION_VECTOR=\n')
for i in range(32):
for j in range(32):
if(i < 2 or i > 29):
weight_file.write(c.dec2Bincmpmt('0', 8)+';\n')
elif(j < 2 or j > 29):
weight_file.write(c.dec2Bincmpmt('0', 8)+';\n')
else:
weight_file.write(c.dec2Bincmpmt(str(a[i-2][j-2]), 8)+',\n')
'''
|
12216
|
class Wrapper(object):
wrapper_classes = {}
@classmethod
def wrap(cls, obj):
return cls(obj)
def __init__(self, wrapped):
self.__dict__['wrapped'] = wrapped
def __getattr__(self, name):
return getattr(self.wrapped, name)
def __setattr__(self, name, value):
setattr(self.wrapped, name, value)
def __delattr__(self, name):
delattr(self.wrapped, name)
def __str__(self):
return str(self.wrapped)
def __repr__(self):
return repr(self.wrapped)
|
12221
|
if not __name__ == "__main__":
print("Started <Pycraft_StartupAnimation>")
class GenerateStartupScreen:
def __init__(self):
pass
def Start(self):
try:
self.Display.fill(self.BackgroundCol)
self.mod_Pygame__.display.flip()
self.mod_Pygame__.display.set_caption(f"Pycraft: v{self.version}: Welcome")
PresentsFont = self.mod_Pygame__.font.Font(self.mod_OS__.path.join(self.base_folder, ("Fonts\\Book Antiqua.ttf")), 35)
PycraftFont = self.mod_Pygame__.font.Font(self.mod_OS__.path.join(self.base_folder, ("Fonts\\Book Antiqua.ttf")), 60)
NameFont = self.mod_Pygame__.font.Font(self.mod_OS__.path.join(self.base_folder, ("Fonts\\Book Antiqua.ttf")), 45)
NameText = NameFont.render("<NAME>", True, self.FontCol)
NameTextWidth = NameText.get_width()
NameTextHeight = NameText.get_height()
PresentsText = PresentsFont.render("presents", True, self.FontCol)
PycraftText = PycraftFont.render("Pycraft", True, self.FontCol)
PycraftTextWidth = PycraftText.get_width()
PycraftTextHeight = PycraftText.get_height()
iteration = 0
clock = self.mod_Pygame__.time.Clock()
if self.RunFullStartup == True:
while iteration <= (60*3):
self.realWidth, self.realHeight = self.mod_Pygame__.display.get_window_size()
self.Display.fill(self.BackgroundCol)
self.Display.blit(NameText, ((self.realWidth-NameTextWidth)/2, (self.realHeight-NameTextHeight)/2))
iteration += 1
if self.realWidth < 1280:
self.mod_DisplayUtils__.DisplayUtils.GenerateMinDisplay(self, 1280, self.SavedHeight)
if self.realHeight < 720:
self.mod_DisplayUtils__.DisplayUtils.GenerateMinDisplay(self, self.SavedWidth, 720)
self.mod_Pygame__.display.flip()
clock.tick(60)
for event in self.mod_Pygame__.event.get():
if event.type == self.mod_Pygame__.QUIT:
self.Stop_Thread_Event.set()
self.Thread_StartLongThread.join()
self.Thread_AdaptiveMode.join()
self.Thread_StartLongThread.join()
self.mod_Pygame__.quit()
self.mod_Sys__.exit("Thanks for playing")
quit()
iteration = 0
while iteration <= (60*2):
self.realWidth, self.realHeight = self.mod_Pygame__.display.get_window_size()
self.Display.fill(self.BackgroundCol)
self.Display.blit(NameText, ((self.realWidth-NameTextWidth)/2, (self.realHeight-NameTextHeight)/2))
self.Display.blit(PresentsText, ((((self.realWidth-NameTextWidth)/2)+120), ((self.realHeight-NameTextHeight)/2)+30))
iteration += 1
if self.realWidth < 1280:
self.mod_DisplayUtils__.DisplayUtils.GenerateMinDisplay(self, 1280, self.SavedHeight)
if self.realHeight < 720:
self.mod_DisplayUtils__.DisplayUtils.GenerateMinDisplay(self, self.SavedWidth, 720)
self.mod_Pygame__.display.flip()
clock.tick(60)
for event in self.mod_Pygame__.event.get():
if event.type == self.mod_Pygame__.QUIT:
self.Stop_Thread_Event.set()
self.Thread_StartLongThread.join()
self.Thread_AdaptiveMode.join()
self.Thread_StartLongThread.join()
self.mod_Pygame__.quit()
self.mod_Sys__.exit("Thanks for playing")
quit()
iteration = 0
while iteration <= (60*3):
self.realWidth, self.realHeight = self.mod_Pygame__.display.get_window_size()
self.Display.fill(self.BackgroundCol)
self.Display.blit(PycraftText, ((self.realWidth-PycraftTextWidth)/2, (self.realHeight-PycraftTextHeight)/2))
iteration += 1
if self.realWidth < 1280:
self.mod_DisplayUtils__.DisplayUtils.GenerateMinDisplay(self, 1280, self.SavedHeight)
if self.realHeight < 720:
self.mod_DisplayUtils__.DisplayUtils.GenerateMinDisplay(self, self.SavedWidth, 720)
self.mod_Pygame__.display.flip()
clock.tick(60)
for event in self.mod_Pygame__.event.get():
if event.type == self.mod_Pygame__.QUIT:
self.Stop_Thread_Event.set()
self.Thread_StartLongThread.join()
self.Thread_AdaptiveMode.join()
self.Thread_StartLongThread.join()
self.mod_Pygame__.quit()
self.mod_Sys__.exit("Thanks for playing")
quit()
y = 0
while True:
self.realWidth, self.realHeight = self.mod_Pygame__.display.get_window_size()
self.Display.fill(self.BackgroundCol)
self.Display.blit(PycraftText, ((self.realWidth-PycraftTextWidth)/2, ((self.realHeight-PycraftTextHeight)/2)-y))
y += 2
if self.realWidth < 1280:
self.mod_DisplayUtils__.DisplayUtils.GenerateMinDisplay(self, 1280, self.SavedHeight)
if self.realHeight < 720:
self.mod_DisplayUtils__.DisplayUtils.GenerateMinDisplay(self, self.SavedWidth, 720)
self.mod_Pygame__.display.flip()
clock.tick(60)
for event in self.mod_Pygame__.event.get():
if event.type == self.mod_Pygame__.QUIT:
self.Stop_Thread_Event.set()
self.Thread_StartLongThread.join()
self.Thread_AdaptiveMode.join()
self.Thread_StartLongThread.join()
self.mod_Pygame__.quit()
self.mod_Sys__.exit("Thanks for playing")
quit()
if ((self.realHeight-PycraftTextHeight)/2)-y <= 0:
self.RunFullStartup = False
return None
except Exception as Message:
self.RunFullStartup = False
return Message
else:
print("You need to run this as part of Pycraft")
import tkinter as tk
from tkinter import messagebox
root = tk.Tk()
root.withdraw()
messagebox.showerror("Startup Fail", "You need to run this as part of Pycraft, please run the 'main.py' file")
quit()
|
12277
|
from django.db.models.signals import post_init
from factory import DjangoModelFactory, Sequence, SubFactory
from factory.django import mute_signals
from affiliates.banners import models
class CategoryFactory(DjangoModelFactory):
FACTORY_FOR = models.Category
name = Sequence(lambda n: 'test{0}'.format(n))
class BannerFactory(DjangoModelFactory):
ABSTRACT_FACTORY = True
category = SubFactory(CategoryFactory)
name = Sequence(lambda n: 'test{0}'.format(n))
destination = 'https://mozilla.org/'
visible = True
class ImageBannerFactory(BannerFactory):
FACTORY_FOR = models.ImageBanner
@mute_signals(post_init)
class ImageVariationFactory(DjangoModelFactory):
ABSTRACT_FACTORY = True
color = 'Blue'
locale = 'en-us'
image = 'uploads/image_banners/test.png'
class ImageBannerVariationFactory(ImageVariationFactory):
FACTORY_FOR = models.ImageBannerVariation
banner = SubFactory(ImageBannerFactory)
class TextBannerFactory(BannerFactory):
FACTORY_FOR = models.TextBanner
class TextBannerVariationFactory(DjangoModelFactory):
FACTORY_FOR = models.TextBannerVariation
banner = SubFactory(TextBannerFactory)
locale = 'en-us'
text = Sequence(lambda n: 'test{0}'.format(n))
class FirefoxUpgradeBannerFactory(BannerFactory):
FACTORY_FOR = models.FirefoxUpgradeBanner
@mute_signals(post_init)
class FirefoxUpgradeBannerVariationFactory(ImageVariationFactory):
FACTORY_FOR = models.FirefoxUpgradeBannerVariation
banner = SubFactory(FirefoxUpgradeBannerFactory)
image = 'uploads/firefox_upgrade_banners/test.png'
upgrade_image = 'uploads/firefox_upgrade_banners/test_upgrade.png'
|
12297
|
from flask import Flask, jsonify, request, render_template, redirect
from flask_pymongo import PyMongo
from werkzeug import secure_filename
import base64
app = Flask(__name__)
app.config['MONGO_DBNAME'] = 'restdb'
app.config['MONGO_URI'] = 'mongodb://localhost:27017/restdb'
mongo = PyMongo(app)
@app.route('/')
def index():
return render_template("index.html")
@app.route('/w')
def webcam():
return render_template("webcam.html")
@app.route('/img')
def img():
i = request.query_string
f = open('a.png','wb')
f.write(i.decode('base64'))
return "success <img src='" + i + "'>"
@app.route('/hello')
def hello():
return "hello world"
@app.route('/star', methods=['GET'])
def get_all_stars():
star = mongo.db.stars
output = []
for s in star.find():
output.append({'name' : s['name'], 'distance' : s['distance']})
return jsonify(output)
@app.route('/star/', methods=['GET'])
def get_one_star(name):
star = mongo.db.stars
s = star.find_one({'name' : name})
if s:
output = {'name': s['name'], 'distance': s['distance']}
else:
output = "No such name"
return jsonify(output)
@app.route('/star', methods=['POST'])
def add_star():
star = mongo.db.stars
name = request.json['name']
distance = request.json['distance']
star_id = star.insert({'name': name, 'distance': distance})
new_star = star.find_one({'_id': star_id})
output = {'name' : new_star['name'], 'distance' : new_star['distance']}
return jsonify(output)
@app.route('/uploader', methods=['POST'])
def upload_file():
f = request.files['file']
f.save(secure_filename('1'))
return "uploaded"
if __name__ == '__main__':
app.run(debug=True)
|
12315
|
from test.test_base import TestBase
class TestMath(TestBase):
def test_isclose(self):
_test = self._assert_execute
_test('ㄹ (ㄱㅇㄱ ㄱㅇㄱ ㄴㄱ ㅅㅎㄷ ㄱㅎㄷ ㄴ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ)ㅎㄴ', 'True')
_test('ㅂ ㅅ ㅂ ㅂㅎㄹ (ㄱㅇㄱ ㄱㅇㄱ ㄴㄱ ㅅㅎㄷ ㄱㅎㄷ ㄴ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ)ㅎㄴ', 'True')
_test('ㅂ ㅅ ㅈ ㅂㅎㄹ (ㄱㅇㄱ ㄱㅇㄱ ㄴㄱ ㅅㅎㄷ ㄱㅎㄷ ㄴ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ)ㅎㄴ', 'True')
_test('ㅈㅈㅈ ㄴㄱ ㅅㅎㄷ ㅅㅈㅈ ㄴㄱ ㅅㅎㄷ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷ', 'False')
_test('ㅈㅈㅈㅈㅈㅈㅈ ㄴㄱ ㅅㅎㄷ ㅅㅈㅈㅈㅈㅈㅈ ㄴㄱ ㅅㅎㄷ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷ', 'False')
_test('ㅅㄷㄱ ㅈ ㄴㄱ ㅅㅎㄷ ㄱㅎㄷ (ㅂ ㅅ ㅂ ㅂㅎㄹ) (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷ', 'False')
_test('ㄴㅈㄱㄹㄴㄹㄱ ㅅㄴㅂㄱㄱㄴㄱ ㄴㄱ ㅅㅎㄷ ㄱㅎㄷ (ㅂ ㅅ ㅂ ㅂㅎㄹ) (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷ', 'True')
_test('(ㅂ ㅅ ㅈ ㅂㅎㄹ) (ㄱ ㅂ ㅅ ㅂ ㅂㅎㄹ ㅄㅎㄷ) ㅅㅎㄷ, ㄴㄱ, (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷ', 'True')
_test('ㄱㄴ ㄷ (ㄱㅇㄱ ㄴㅇㄱ ㄴㄱ ㅅㅎㄷ ㅅㅎㄷ ㄴㅇㄱ ㅅㅎㄷ, ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ)ㅎㄷ', 'True')
_test('ㄱㄴ ㄹ (ㄱㅇㄱ ㄴㅇㄱ ㄴㄱ ㅅㅎㄷ ㅅㅎㄷ ㄴㅇㄱ ㅅㅎㄷ, ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ)ㅎㄷ', 'True')
_test('ㄱㄴ ㅁ (ㄱㅇㄱ ㄴㅇㄱ ㄴㄱ ㅅㅎㄷ ㅅㅎㄷ ㄴㅇㄱ ㅅㅎㄷ, ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ)ㅎㄷ', 'True')
_test('ㄴㄱ ㄷ (ㄱㅇㄱ ㄴㅇㄱ ㄴㄱ ㅅㅎㄷ ㅅㅎㄷ ㄴㅇㄱ ㅅㅎㄷ, ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ)ㅎㄷ', 'True')
_test('ㄴㄱ ㄹ (ㄱㅇㄱ ㄴㅇㄱ ㄴㄱ ㅅㅎㄷ ㅅㅎㄷ ㄴㅇㄱ ㅅㅎㄷ, ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ)ㅎㄷ', 'True')
_test('ㄴㄱ ㅁ (ㄱㅇㄱ ㄴㅇㄱ ㄴㄱ ㅅㅎㄷ ㅅㅎㄷ ㄴㅇㄱ ㅅㅎㄷ, ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ)ㅎㄷ', 'True')
def test_isnan(self):
_test = self._assert_execute
_test('(ㅂ ㅅ ㄴ ㅂㅎㄹ) (ㅂ ㅅ ㄴㄴ ㅂㅎㄹ)ㅎㄴ', 'True')
_test('(ㅂ ㅅ ㅁ ㅂㅎㄹ) (ㅂ ㅅ ㄴㄴ ㅂㅎㄹ)ㅎㄴ', 'False')
_test('ㄱ (ㅂ ㅅ ㄴㄴ ㅂㅎㄹ)ㅎㄴ', 'False')
_test('ㄴ (ㅂ ㅅ ㄴㄴ ㅂㅎㄹ)ㅎㄴ', 'False')
_test('(ㅂ ㅅ ㄴ ㅂㅎㄹ) ㄴ ㅄㅎㄷ (ㅂ ㅅ ㄴㄴ ㅂㅎㄹ)ㅎㄴ', 'True')
_test('ㄱ (ㅂ ㅅ ㄴ ㅂㅎㄹ) ㅄㅎㄷ (ㅂ ㅅ ㄴㄴ ㅂㅎㄹ)ㅎㄴ', 'True')
_test('ㄴ (ㅂ ㅅ ㄴ ㅂㅎㄹ) ㅄㅎㄷ (ㅂ ㅅ ㄴㄴ ㅂㅎㄹ)ㅎㄴ', 'True')
_test('(ㅂ ㅅ ㄴ ㅂㅎㄹ) (ㅂ ㅅ ㄴ ㅂㅎㄹ) ㅄㅎㄷ (ㅂ ㅅ ㄴㄴ ㅂㅎㄹ)ㅎㄴ', 'True')
_test('(ㅂ ㅅ ㅁ ㅂㅎㄹ) (ㅂ ㅅ ㄴ ㅂㅎㄹ) ㅄㅎㄷ (ㅂ ㅅ ㄴㄴ ㅂㅎㄹ)ㅎㄴ', 'True')
_test('(ㅂ ㅅ ㄴ ㅂㅎㄹ) (ㅂ ㅅ ㅁ ㅂㅎㄹ) ㅄㅎㄷ (ㅂ ㅅ ㄴㄴ ㅂㅎㄹ)ㅎㄴ', 'True')
_test('ㅂ ㅅ ㅁ ㅂㅎㄹ ㄴㄱ ㄱㅎㄷ (ㅂ ㅅ ㄴㄴ ㅂㅎㄹ)ㅎㄴ', 'False')
_test('(ㅂ ㅅ ㅁ ㅂㅎㄹ) ㄴ ㅄㅎㄷ (ㅂ ㅅ ㄴㄴ ㅂㅎㄹ)ㅎㄴ', 'False')
_test('ㄱ (ㅂ ㅅ ㅁ ㅂㅎㄹ) ㅄㅎㄷ (ㅂ ㅅ ㄴㄴ ㅂㅎㄹ)ㅎㄴ', 'False')
_test('ㄴ (ㅂ ㅅ ㅁ ㅂㅎㄹ) ㅄㅎㄷ (ㅂ ㅅ ㄴㄴ ㅂㅎㄹ)ㅎㄴ', 'False')
_test('(ㅂ ㅅ ㅁ ㅂㅎㄹ) (ㅂ ㅅ ㅁ ㅂㅎㄹ) ㅄㅎㄷ (ㅂ ㅅ ㄴㄴ ㅂㅎㄹ)ㅎㄴ', 'False')
_test('ㄱ (ㅂ ㅅ ㅁ ㅂㅎㄹ ㄴㄱ ㄱㅎㄷ) ㅄㅎㄷ (ㅂ ㅅ ㄴㄴ ㅂㅎㄹ)ㅎㄴ', 'False')
_test('ㄱ (ㅂ ㅅ ㅁ ㅂㅎㄹ ㄴㄱ ㄱㅎㄷ) ㅄㅎㄷ (ㅂ ㅅ ㄴㄴ ㅂㅎㄹ)ㅎㄴ', 'False')
def test_isinf(self):
_test = self._assert_execute
_test('(ㅂ ㅅ ㅁ ㅂㅎㄹ) (ㅂ ㅅ ㅁㄴ ㅂㅎㄹ)ㅎㄴ', 'True')
_test('(ㅂ ㅅ ㄴ ㅂㅎㄹ) (ㅂ ㅅ ㅁㄴ ㅂㅎㄹ)ㅎㄴ', 'False')
_test('ㄱ (ㅂ ㅅ ㅁㄴ ㅂㅎㄹ)ㅎㄴ', 'False')
_test('ㄴ (ㅂ ㅅ ㅁㄴ ㅂㅎㄹ)ㅎㄴ', 'False')
_test('ㅂ ㅅ ㅁ ㅂㅎㄹ ㄴㄱ ㄱㅎㄷ (ㅂ ㅅ ㅁㄴ ㅂㅎㄹ)ㅎㄴ', 'True')
_test('(ㅂ ㅅ ㅁ ㅂㅎㄹ) ㄴ ㅄㅎㄷ (ㅂ ㅅ ㅁㄴ ㅂㅎㄹ)ㅎㄴ', 'True')
_test('(ㅂ ㅅ ㅁ ㅂㅎㄹ ㄴㄱ ㄱㅎㄷ) ㄴ ㅄㅎㄷ (ㅂ ㅅ ㅁㄴ ㅂㅎㄹ)ㅎㄴ', 'True')
_test('(ㅂ ㅅ ㅁ ㅂㅎㄹ) (ㅂ ㅅ ㄴ ㅂㅎㄹ) ㅄㅎㄷ (ㅂ ㅅ ㅁㄴ ㅂㅎㄹ)ㅎㄴ', 'True')
_test('ㄱ (ㅂ ㅅ ㅁ ㅂㅎㄹ) ㅄㅎㄷ (ㅂ ㅅ ㅁㄴ ㅂㅎㄹ)ㅎㄴ', 'True')
_test('ㄴ (ㅂ ㅅ ㅁ ㅂㅎㄹ) ㅄㅎㄷ (ㅂ ㅅ ㅁㄴ ㅂㅎㄹ)ㅎㄴ', 'True')
_test('(ㅂ ㅅ ㄴ ㅂㅎㄹ) (ㅂ ㅅ ㅁ ㅂㅎㄹ) ㅄㅎㄷ (ㅂ ㅅ ㅁㄴ ㅂㅎㄹ)ㅎㄴ', 'True')
_test('(ㅂ ㅅ ㅁ ㅂㅎㄹ) (ㅂ ㅅ ㅁ ㅂㅎㄹ) ㅄㅎㄷ (ㅂ ㅅ ㅁㄴ ㅂㅎㄹ)ㅎㄴ', 'True')
_test('ㄱ (ㅂ ㅅ ㅁ ㅂㅎㄹ ㄴㄱ ㄱㅎㄷ) ㅄㅎㄷ (ㅂ ㅅ ㅁㄴ ㅂㅎㄹ)ㅎㄴ', 'True')
_test('(ㅂ ㅅ ㄴ ㅂㅎㄹ) ㄴ ㅄㅎㄷ (ㅂ ㅅ ㅁㄴ ㅂㅎㄹ)ㅎㄴ', 'False')
_test('ㄱ (ㅂ ㅅ ㄴ ㅂㅎㄹ) ㅄㅎㄷ (ㅂ ㅅ ㅁㄴ ㅂㅎㄹ)ㅎㄴ', 'False')
_test('ㄴ (ㅂ ㅅ ㄴ ㅂㅎㄹ) ㅄㅎㄷ (ㅂ ㅅ ㅁㄴ ㅂㅎㄹ)ㅎㄴ', 'False')
_test('(ㅂ ㅅ ㄴ ㅂㅎㄹ) (ㅂ ㅅ ㄴ ㅂㅎㄹ) ㅄㅎㄷ (ㅂ ㅅ ㅁㄴ ㅂㅎㄹ)ㅎㄴ', 'False')
def test_abs(self):
_test = self._assert_execute
_test('ㄱ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ', '0')
_test('ㄱ ㄷ ㄴㄱ ㅅㅎㄷ ㄱㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ', '0.0')
_test('ㄱ ㅄㅎㄴ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ', '0.0')
_test('ㄴ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ', '1')
_test('ㄴㄱ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ', '1')
_test('ㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ', '2')
_test('ㄷㄱ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ', '2')
_test('ㄷ ㄴㄱ ㅅㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ', '0.5')
_test('ㄷㄱ ㄴㄱ ㅅㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ', '0.5')
_test('ㅁ ㄴㄱ ㅅㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ', '0.25')
_test('ㅁㄱ ㄴㄱ ㅅㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ', '0.25')
_test('ㄴ ㅄㅎㄴ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ', '1.0')
_test('ㄴㄱ ㅄㅎㄴ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ', '1.0')
_test('ㄷ ㄴㄱ ㅅㅎㄷ ㅄㅎㄴ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ', '0.5')
_test('ㄷㄱ ㄴㄱ ㅅㅎㄷ ㅄㅎㄴ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ', '0.5')
_test('ㄱ ㄴ ㅄㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ', '1.0')
_test('ㄱ ㄴㄱ ㅄㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ', '1.0')
_test('ㄱ ㄷ ㄴㄱ ㅅㅎㄷ ㅄㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ', '0.5')
_test('ㄱ ㄷㄱ ㄴㄱ ㅅㅎㄷ ㅄㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ', '0.5')
_test('ㄹ ㅁ ㅄㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ ㅂ ㅂ ㅅ ㄱ ㅂㅎㅀㄷ', 'True')
_test('ㄹㄱ ㅁ ㅄㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ ㅂ ㅂ ㅅ ㄱ ㅂㅎㅀㄷ', 'True')
_test('ㄹ ㅁㄱ ㅄㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ ㅂ ㅂ ㅅ ㄱ ㅂㅎㅀㄷ', 'True')
_test('ㄹㄱ ㅁㄱ ㅄㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ ㅂ ㅂ ㅅ ㄱ ㅂㅎㅀㄷ', 'True')
_test('ㅂ ㅁㄴㄱ ㅄㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ ㅂㄴㄱ ㅂ ㅅ ㄱ ㅂㅎㅀㄷ', 'True')
_test('ㅂㄱ ㅁㄴㄱ ㅄㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ ㅂㄴㄱ ㅂ ㅅ ㄱ ㅂㅎㅀㄷ', 'True')
_test('ㅂ ㅁㄴ ㅄㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ ㅂㄴㄱ ㅂ ㅅ ㄱ ㅂㅎㅀㄷ', 'True')
_test('ㅂㄱ ㅁㄴ ㅄㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ ㅂㄴㄱ ㅂ ㅅ ㄱ ㅂㅎㅀㄷ', 'True')
_test('ㄴㄱ ㄷ ㄴㄱ ㅅㅎㄷ ㅅㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ ㄴ ㅂ ㅅ ㄱ ㅂㅎㅀㄷ', 'True')
_test('ㄴㄱ ㄹ ㄴㄱ ㅅㅎㄷ ㅅㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ ㄴ ㅂ ㅅ ㄱ ㅂㅎㅀㄷ', 'True')
_test('ㄴㄱ ㅁ ㄴㄱ ㅅㅎㄷ ㅅㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ ㄴ ㅂ ㅅ ㄱ ㅂㅎㅀㄷ', 'True')
_test('ㅁㄱ ㄷ ㄴㄱ ㅅㅎㄷ ㅅㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ ㄷ ㅂ ㅅ ㄱ ㅂㅎㅀㄷ', 'True')
_test('ㄱㄴ ㄹ ㄴㄱ ㅅㅎㄷ ㅅㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ ㄷ ㅂ ㅅ ㄱ ㅂㅎㅀㄷ', 'True')
_test('ㄱㄷ ㅁ ㄴㄱ ㅅㅎㄷ ㅅㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ ㄷ ㅂ ㅅ ㄱ ㅂㅎㅀㄷ', 'True')
def test_log(self):
_test = self._assert_execute
_test('ㄴ [((ㅂ ㅅ ㅈ ㅂㅎㄹ) (ㄱㅇㄱ ㅂ ㅅ ㄺ ㅂㅎㅀㄴ) ㅅㅎㄷ) ㄱㅇㄱ ㅂ ㅅ ㄱ ㅂㅎㅀㄷㅎ]ㅎㄴ', 'True')
_test('ㄴㄱ [(ㅂ ㅅ ㅈ ㅂㅎㄹ (ㄱㅇㄱ ㅂ ㅅ ㄺ ㅂㅎㅀㄴ) ㅅㅎㄷ) ㄱㅇㄱ ㅂ ㅅ ㄱ ㅂㅎㅀㄷㅎ]ㅎㄴ', 'True')
_test('ㄷㄱ [(ㅂ ㅅ ㅈ ㅂㅎㄹ (ㄱㅇㄱ ㅂ ㅅ ㄺ ㅂㅎㅀㄴ) ㅅㅎㄷ) ㄱㅇㄱ ㅂ ㅅ ㄱ ㅂㅎㅀㄷㅎ]ㅎㄴ', 'True')
_test('ㄷ [(ㅂ ㅅ ㅈ ㅂㅎㄹ (ㄱㅇㄱ ㅂ ㅅ ㄺ ㅂㅎㅀㄴ) ㅅㅎㄷ) ㄱㅇㄱ ㅂ ㅅ ㄱ ㅂㅎㅀㄷㅎ]ㅎㄴ', 'True')
_test('ㄴㄱ ㄹ ㄴㄱ ㅅㅎㄷ ㅅㅎㄷ [(ㅂ ㅅ ㅈ ㅂㅎㄹ (ㄱㅇㄱ ㅂ ㅅ ㄺ ㅂㅎㅀㄴ) ㅅㅎㄷ) ㄱㅇㄱ ㅂ ㅅ ㄱ ㅂㅎㅀㄷㅎ]ㅎㄴ', 'True')
def test_trig(self):
_test = self._assert_execute
_test('ㄱ (ㅂ ㅅ ㅅㄴ ㅂㅎㅀㄴ) ㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷ', 'True')
_test('[ㅈㄱ ㅅㄱ ㅂㄱ ㅁㄱ ㄺ ㄷㄱ ㄴㄱ ㄱ ㄴ ㄷ ㄹ ㅁ ㅂ ㅅ ㅈ] ㅁㅀㅈㄴㄱ [ㅂ ㅅ ㅂ ㅂㅎㄹ (ㄱㅇㄱ ㄷ ㄱㅎㄷ ㄷ ㄴㄱ ㅅㅎㄷ ㄷㅎㄷ) ㄱㅎㄷ (ㅂ ㅅ ㅅㄴ ㅂㅎㅀㄴ) ㄴ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ] ㅁㄷㅎㄷ (ㄱ ㅁㅂㅎㄴ)ㅎㄴ', 'True')
_test('[ㅈㄱ ㅅㄱ ㅂㄱ ㅁㄱ ㄺ ㄷㄱ ㄴㄱ ㄱ ㄴ ㄷ ㄹ ㅁ ㅂ ㅅ ㅈ] ㅁㅀㅈㄴㄱ [ㅂ ㅅ ㅂ ㅂㅎㄹ (ㄱㅇㄱ ㄷ ㄱㅎㄷ ㄷㄱ ㄴㄱ ㅅㅎㄷ ㄷㅎㄷ) ㄱㅎㄷ (ㅂ ㅅ ㅅㄴ ㅂㅎㅀㄴ) ㄴㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ] ㅁㄷㅎㄷ (ㄱ ㅁㅂㅎㄴ)ㅎㄴ', 'True')
_test('ㄱ (ㅂ ㅅ ㄳ ㅂㅎㅀㄴ) ㄴ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷ', 'True')
_test('[ㅈㄱ ㅅㄱ ㅂㄱ ㅁㄱ ㄺ ㄷㄱ ㄴㄱ ㄱ ㄴ ㄷ ㄹ ㅁ ㅂ ㅅ ㅈ] ㅁㅀㅈㄴㄱ [ㅂ ㅅ ㅂ ㅂㅎㄹ (ㄱㅇㄱ ㄷ ㄱㅎㄷ) ㄱㅎㄷ (ㅂ ㅅ ㄳ ㅂㅎㅀㄴ) ㄴ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ] ㅁㄷㅎㄷ (ㄱ ㅁㅂㅎㄴ)ㅎㄴ', 'True')
_test('[ㅈㄱ ㅅㄱ ㅂㄱ ㅁㄱ ㄺ ㄷㄱ ㄴㄱ ㄱ ㄴ ㄷ ㄹ ㅁ ㅂ ㅅ ㅈ] ㅁㅀㅈㄴㄱ [ㅂ ㅅ ㅂ ㅂㅎㄹ (ㄱㅇㄱ ㄷ ㄱㅎㄷ ㄴ ㄷㅎㄷ) ㄱㅎㄷ (ㅂ ㅅ ㄳ ㅂㅎㅀㄴ) ㄴㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ] ㅁㄷㅎㄷ (ㄱ ㅁㅂㅎㄴ)ㅎㄴ', 'True')
_test('ㄱ (ㅂ ㅅ ㄷㄴ ㅂㅎㅀㄴ) ㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷ', 'True')
_test('(ㅂ ㅅ ㅂ ㅂㅎㄹ ㅁ ㄴㄱ ㅅㅎㄷ ㄱㅎㄷ) (ㅂ ㅅ ㄷㄴ ㅂㅎㅀㄴ) ㄴ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷ', 'True')
_test('(ㅂ ㅅ ㅂ ㅂㅎㄹ ㅁㄱ ㄴㄱ ㅅㅎㄷ ㄱㅎㄷ) (ㅂ ㅅ ㄷㄴ ㅂㅎㅀㄴ) ㄴㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷ', 'True')
_test('(ㅂ ㅅ ㅂ ㅂㅎㄹ ㄹ ㄴㄱ ㅅㅎㄷ ㄱㅎㄷ) (ㅂ ㅅ ㄷㄴ ㅂㅎㅀㄴ) (ㄹ ㄷ ㄴㄱ ㅅㅎㄷ ㅅㅎㄷ) (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷ', 'True')
_test('(ㅂ ㅅ ㅂ ㅂㅎㄹ ㅅ ㄴㄱ ㅅㅎㄷ ㄱㅎㄷ) (ㅂ ㅅ ㄷㄴ ㅂㅎㅀㄴ) (ㄹ ㄷㄱ ㄴㄱ ㅅㅎㄷ ㅅㅎㄷ) (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷ', 'True')
_test('ㄱ [(ㄱㅇㄱ ㅂ ㅅ ㄳ ㅂㅎㅀㄴ ㄱㅇㄱ ㅂ ㅅ ㅅㄴ ㅂㅎㅀㄴ ㅄㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ) ㄴ ㅂ ㅅ ㄱ ㅂㅎㅀㄷㅎ]ㅎㄴ', 'True')
_test('ㄴ [(ㄱㅇㄱ ㅂ ㅅ ㄳ ㅂㅎㅀㄴ ㄱㅇㄱ ㅂ ㅅ ㅅㄴ ㅂㅎㅀㄴ ㅄㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ) ㄴ ㅂ ㅅ ㄱ ㅂㅎㅀㄷㅎ]ㅎㄴ', 'True')
_test('ㄴㄱ [(ㄱㅇㄱ ㅂ ㅅ ㄳ ㅂㅎㅀㄴ ㄱㅇㄱ ㅂ ㅅ ㅅㄴ ㅂㅎㅀㄴ ㅄㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ) ㄴ ㅂ ㅅ ㄱ ㅂㅎㅀㄷㅎ]ㅎㄴ', 'True')
_test('ㄷ [(ㄱㅇㄱ ㅂ ㅅ ㄳ ㅂㅎㅀㄴ ㄱㅇㄱ ㅂ ㅅ ㅅㄴ ㅂㅎㅀㄴ ㅄㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ) ㄴ ㅂ ㅅ ㄱ ㅂㅎㅀㄷㅎ]ㅎㄴ', 'True')
_test('ㄷㄱ [(ㄱㅇㄱ ㅂ ㅅ ㄳ ㅂㅎㅀㄴ ㄱㅇㄱ ㅂ ㅅ ㅅㄴ ㅂㅎㅀㄴ ㅄㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ) ㄴ ㅂ ㅅ ㄱ ㅂㅎㅀㄷㅎ]ㅎㄴ', 'True')
_test('ㅂ ㅅ ㅂ ㅂㅎㄹ [(ㄱㅇㄱ ㅂ ㅅ ㄳ ㅂㅎㅀㄴ ㄱㅇㄱ ㅂ ㅅ ㅅㄴ ㅂㅎㅀㄴ ㅄㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ) ㄴ ㅂ ㅅ ㄱ ㅂㅎㅀㄷㅎ]ㅎㄴ', 'True')
_test('ㄱ [(ㄱㅇㄱ ㅂ ㅅ ㄷㄴ ㅂㅎㅀㄴ ㄴ ㅄㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ ㄷ ㅅㅎㄷ) (ㄱㅇㄱ ㅂ ㅅ ㄳ ㅂㅎㅀㄴ ㄷㄱ ㅅㅎㄷ) ㅂ ㅅ ㄱ ㅂㅎㅀㄷㅎ]ㅎㄴ', 'True')
_test('ㄴ [(ㄱㅇㄱ ㅂ ㅅ ㄷㄴ ㅂㅎㅀㄴ ㄴ ㅄㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ ㄷ ㅅㅎㄷ) (ㄱㅇㄱ ㅂ ㅅ ㄳ ㅂㅎㅀㄴ ㄷㄱ ㅅㅎㄷ) ㅂ ㅅ ㄱ ㅂㅎㅀㄷㅎ]ㅎㄴ', 'True')
_test('ㄴㄱ [(ㄱㅇㄱ ㅂ ㅅ ㄷㄴ ㅂㅎㅀㄴ ㄴ ㅄㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ ㄷ ㅅㅎㄷ) (ㄱㅇㄱ ㅂ ㅅ ㄳ ㅂㅎㅀㄴ ㄷㄱ ㅅㅎㄷ) ㅂ ㅅ ㄱ ㅂㅎㅀㄷㅎ]ㅎㄴ', 'True')
_test('ㄷ [(ㄱㅇㄱ ㅂ ㅅ ㄷㄴ ㅂㅎㅀㄴ ㄴ ㅄㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ ㄷ ㅅㅎㄷ) (ㄱㅇㄱ ㅂ ㅅ ㄳ ㅂㅎㅀㄴ ㄷㄱ ㅅㅎㄷ) ㅂ ㅅ ㄱ ㅂㅎㅀㄷㅎ]ㅎㄴ', 'True')
_test('ㄷㄱ [(ㄱㅇㄱ ㅂ ㅅ ㄷㄴ ㅂㅎㅀㄴ ㄴ ㅄㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ ㄷ ㅅㅎㄷ) (ㄱㅇㄱ ㅂ ㅅ ㄳ ㅂㅎㅀㄴ ㄷㄱ ㅅㅎㄷ) ㅂ ㅅ ㄱ ㅂㅎㅀㄷㅎ]ㅎㄴ', 'True')
_test('ㅂ ㅅ ㅂ ㅂㅎㄹ [(ㄱㅇㄱ ㅂ ㅅ ㄷㄴ ㅂㅎㅀㄴ ㄴ ㅄㅎㄷ ㅂ ㅅ ㅈㄷ ㅂㅎㅀㄴ ㄷ ㅅㅎㄷ) (ㄱㅇㄱ ㅂ ㅅ ㄳ ㅂㅎㅀㄴ ㄷㄱ ㅅㅎㄷ) ㅂ ㅅ ㄱ ㅂㅎㅀㄷㅎ]ㅎㄴ', 'True')
def test_asin(self):
_test = self._assert_execute
_test('ㄱ [(ㄱㅇㄱ ㅂ ㅅ ㅅㄴ ㅂㅎㅀㄴ ㅂ ㅅ ㄴㅅ ㅂㅎㅀㄴ) ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ]ㅎㄴ', 'True')
_test('ㄴ [(ㄱㅇㄱ ㅂ ㅅ ㅅㄴ ㅂㅎㅀㄴ ㅂ ㅅ ㄴㅅ ㅂㅎㅀㄴ) ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ]ㅎㄴ', 'True')
_test('ㄴㄱ [(ㄱㅇㄱ ㅂ ㅅ ㅅㄴ ㅂㅎㅀㄴ ㅂ ㅅ ㄴㅅ ㅂㅎㅀㄴ) ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ]ㅎㄴ', 'True')
_test('ㄱ ㄴ ㅄㅎㄷ [(ㄱㅇㄱ ㅂ ㅅ ㅅㄴ ㅂㅎㅀㄴ ㅂ ㅅ ㄴㅅ ㅂㅎㅀㄴ) ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ]ㅎㄴ', 'True')
_test('ㄴ ㄴ ㅄㅎㄷ [(ㄱㅇㄱ ㅂ ㅅ ㅅㄴ ㅂㅎㅀㄴ ㅂ ㅅ ㄴㅅ ㅂㅎㅀㄴ) ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ]ㅎㄴ', 'True')
_test('ㄱ [(ㄱㅇㄱ ㅂ ㅅ ㄴㅅ ㅂㅎㅀㄴ ㅂ ㅅ ㅅㄴ ㅂㅎㅀㄴ) ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ]ㅎㄴ', 'True')
_test('ㄴ[(ㄱㅇㄱ ㅂ ㅅ ㄴㅅ ㅂㅎㅀㄴ ㅂ ㅅ ㅅㄴ ㅂㅎㅀㄴ) ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ]ㅎㄴ', 'True')
_test('ㄴㄱ[(ㄱㅇㄱ ㅂ ㅅ ㄴㅅ ㅂㅎㅀㄴ ㅂ ㅅ ㅅㄴ ㅂㅎㅀㄴ) ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ]ㅎㄴ', 'True')
_test('ㄱ ㄴ ㅄㅎㄷ[(ㄱㅇㄱ ㅂ ㅅ ㄴㅅ ㅂㅎㅀㄴ ㅂ ㅅ ㅅㄴ ㅂㅎㅀㄴ) ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ]ㅎㄴ', 'True')
_test('ㄴ ㄴ ㅄㅎㄷ[(ㄱㅇㄱ ㅂ ㅅ ㄴㅅ ㅂㅎㅀㄴ ㅂ ㅅ ㅅㄴ ㅂㅎㅀㄴ) ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ]ㅎㄴ', 'True')
def test_acos(self):
_test = self._assert_execute
_test('ㄱ [(ㄱㅇㄱ ㅂ ㅅ ㄳ ㅂㅎㅀㄴ ㅂ ㅅ ㅅㄱ ㅂㅎㅀㄴ) ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ]ㅎㄴ', 'True')
_test('ㄴ [(ㄱㅇㄱ ㅂ ㅅ ㄳ ㅂㅎㅀㄴ ㅂ ㅅ ㅅㄱ ㅂㅎㅀㄴ) ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ]ㅎㄴ', 'True')
_test('ㄱ ㄴ ㅄㅎㄷ [(ㄱㅇㄱ ㅂ ㅅ ㄳ ㅂㅎㅀㄴ ㅂ ㅅ ㅅㄱ ㅂㅎㅀㄴ) ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ]ㅎㄴ', 'True')
_test('ㄴ ㄴ ㅄㅎㄷ [(ㄱㅇㄱ ㅂ ㅅ ㄳ ㅂㅎㅀㄴ ㅂ ㅅ ㅅㄱ ㅂㅎㅀㄴ) ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ]ㅎㄴ', 'True')
_test('ㄴ[(ㄱㅇㄱ ㅂ ㅅ ㅅㄱ ㅂㅎㅀㄴ ㅂ ㅅ ㄳ ㅂㅎㅀㄴ) ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ]ㅎㄴ', 'True')
_test('ㄴㄱ[(ㄱㅇㄱ ㅂ ㅅ ㅅㄱ ㅂㅎㅀㄴ ㅂ ㅅ ㄳ ㅂㅎㅀㄴ) ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ]ㅎㄴ', 'True')
_test('ㄱ ㄴ ㅄㅎㄷ[(ㄱㅇㄱ ㅂ ㅅ ㅅㄱ ㅂㅎㅀㄴ ㅂ ㅅ ㄳ ㅂㅎㅀㄴ) ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ]ㅎㄴ', 'True')
_test('ㄴ ㄴ ㅄㅎㄷ[(ㄱㅇㄱ ㅂ ㅅ ㅅㄱ ㅂㅎㅀㄴ ㅂ ㅅ ㄳ ㅂㅎㅀㄴ) ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ]ㅎㄴ', 'True')
def test_atan(self):
_test = self._assert_execute
_test('ㄱ [(ㄱㅇㄱ ㅂ ㅅ ㄷㄴ ㅂㅎㅀㄴ ㅂ ㅅ ㄴㄷ ㅂㅎㅀㄴ) ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ]ㅎㄴ', 'True')
_test('ㄴ [(ㄱㅇㄱ ㅂ ㅅ ㄷㄴ ㅂㅎㅀㄴ ㅂ ㅅ ㄴㄷ ㅂㅎㅀㄴ) ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ]ㅎㄴ', 'True')
_test('ㄴㄱ [(ㄱㅇㄱ ㅂ ㅅ ㄷㄴ ㅂㅎㅀㄴ ㅂ ㅅ ㄴㄷ ㅂㅎㅀㄴ) ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ]ㅎㄴ', 'True')
_test('ㄱ ㄴ ㅄㅎㄷ [(ㄱㅇㄱ ㅂ ㅅ ㄷㄴ ㅂㅎㅀㄴ ㅂ ㅅ ㄴㄷ ㅂㅎㅀㄴ) ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ]ㅎㄴ', 'True')
_test('ㄴ ㄴ ㅄㅎㄷ [(ㄱㅇㄱ ㅂ ㅅ ㄷㄴ ㅂㅎㅀㄴ ㅂ ㅅ ㄴㄷ ㅂㅎㅀㄴ) ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ]ㅎㄴ', 'True')
_test('ㄱ [(ㄱㅇㄱ ㅂ ㅅ ㄴㄷ ㅂㅎㅀㄴ ㅂ ㅅ ㄷㄴ ㅂㅎㅀㄴ) ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ]ㅎㄴ', 'True')
_test('ㄴ[(ㄱㅇㄱ ㅂ ㅅ ㄴㄷ ㅂㅎㅀㄴ ㅂ ㅅ ㄷㄴ ㅂㅎㅀㄴ) ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ]ㅎㄴ', 'True')
_test('ㄴㄱ[(ㄱㅇㄱ ㅂ ㅅ ㄴㄷ ㅂㅎㅀㄴ ㅂ ㅅ ㄷㄴ ㅂㅎㅀㄴ) ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ]ㅎㄴ', 'True')
def test_atan2(self):
_test = self._assert_execute
_test('ㄱ ㄴ ㅂ ㅅ ㄴㄷ ㅂㅎㄹ ㅎㄷ', '0.0')
_test('ㄱ ㄴㄱ ㅂ ㅅ ㄴㄷ ㅂㅎㄹ ㅎㄷ (ㅂ ㅅ ㅂ ㅂㅎㄹ)ㄶㄷ', 'True')
_test('(ㄴ ㄱ ㅂ ㅅ ㄴㄷ ㅂㅎㄹ ㅎㄷ) ㄷ ㄱㅎㄷ (ㅂ ㅅ ㅂ ㅂㅎㄹ)ㄶㄷ', 'True')
_test('(ㄴㄱ ㄱ ㅂ ㅅ ㄴㄷ ㅂㅎㄹ ㅎㄷ) ㄷㄱ ㄱㅎㄷ (ㅂ ㅅ ㅂ ㅂㅎㄹ)ㄶㄷ', 'True')
_test('[ㄴ ㄴㄱ ㄷ ㄷㄱ ㄹ ㄺ ㅁㅀㅅ] [(ㄱㅇㄱ ㅂ ㅅ ㅅㄴ ㅂㅎㅀㄴ, ㄱㅇㄱ ㅂ ㅅ ㄳ ㅂㅎㅀㄴ, ㅂ ㅅ ㄴㄷ ㅂㅎㅀㄷ) ㄱㅇㄱ (ㅂ ㅅ ㄱ ㅂㅎㄹ)ㅎㄷㅎ] ㅁㄷㅎㄷ (ㄱ ㅁㅂㅎㄴ)ㅎㄴ', 'True')
def test_trunc(self):
_test = self._assert_execute
_test('ㄱ (ㅂ ㅅ ㅂㄹ ㄱ ㅂㅎㅁ)ㅎㄴ', '0')
_test('ㅁ ㄴㄱ ㅅㅎㄷ (ㅂ ㅅ ㅂㄹ ㄱ ㅂㅎㅁ)ㅎㄴ', '0')
_test('ㅁㄱ ㄴㄱ ㅅㅎㄷ (ㅂ ㅅ ㅂㄹ ㄱ ㅂㅎㅁ)ㅎㄴ', '0')
_test('ㄷ ㄴㄱ ㅅㅎㄷ (ㅂ ㅅ ㅂㄹ ㄱ ㅂㅎㅁ)ㅎㄴ', '0')
_test('ㄷㄱ ㄴㄱ ㅅㅎㄷ (ㅂ ㅅ ㅂㄹ ㄱ ㅂㅎㅁ)ㅎㄴ', '0')
_test('ㄴ (ㅂ ㅅ ㅂㄹ ㄱ ㅂㅎㅁ)ㅎㄴ', '1')
_test('ㄴㄱ (ㅂ ㅅ ㅂㄹ ㄱ ㅂㅎㅁ)ㅎㄴ', '-1')
_test('ㄷ ㄴㄱ ㅅㅎㄷ ㄹ ㄱㅎㄷ (ㅂ ㅅ ㅂㄹ ㄱ ㅂㅎㅁ)ㅎㄴ', '1')
_test('ㄷㄱ ㄴㄱ ㅅㅎㄷ ㄹ ㄱㅎㄷ (ㅂ ㅅ ㅂㄹ ㄱ ㅂㅎㅁ)ㅎㄴ', '-1')
_test('ㄷ (ㅂ ㅅ ㅂㄹ ㄱ ㅂㅎㅁ)ㅎㄴ', '2')
_test('ㄷㄱ (ㅂ ㅅ ㅂㄹ ㄱ ㅂㅎㅁ)ㅎㄴ', '-2')
_test('ㄷ ㄴㄱ ㅅㅎㄷ ㅂ ㄱㅎㄷ (ㅂ ㅅ ㅂㄹ ㄱ ㅂㅎㅁ)ㅎㄴ', '2')
_test('ㄷㄱ ㄴㄱ ㅅㅎㄷ ㅂ ㄱㅎㄷ (ㅂ ㅅ ㅂㄹ ㄱ ㅂㅎㅁ)ㅎㄴ', '-2')
def test_floor(self):
_test = self._assert_execute
_test('ㄱ (ㅂ ㅅ ㅂㄹ ㄴ ㅂㅎㅁ)ㅎㄴ', '0')
_test('ㅁ ㄴㄱ ㅅㅎㄷ (ㅂ ㅅ ㅂㄹ ㄴ ㅂㅎㅁ)ㅎㄴ', '0')
_test('ㅁㄱ ㄴㄱ ㅅㅎㄷ (ㅂ ㅅ ㅂㄹ ㄴ ㅂㅎㅁ)ㅎㄴ', '-1')
_test('ㄷ ㄴㄱ ㅅㅎㄷ (ㅂ ㅅ ㅂㄹ ㄴ ㅂㅎㅁ)ㅎㄴ', '0')
_test('ㄷㄱ ㄴㄱ ㅅㅎㄷ (ㅂ ㅅ ㅂㄹ ㄴ ㅂㅎㅁ)ㅎㄴ', '-1')
_test('ㄴ (ㅂ ㅅ ㅂㄹ ㄴ ㅂㅎㅁ)ㅎㄴ', '1')
_test('ㄴㄱ (ㅂ ㅅ ㅂㄹ ㄴ ㅂㅎㅁ)ㅎㄴ', '-1')
_test('ㄷ ㄴㄱ ㅅㅎㄷ ㄹ ㄱㅎㄷ (ㅂ ㅅ ㅂㄹ ㄴ ㅂㅎㅁ)ㅎㄴ', '1')
_test('ㄷㄱ ㄴㄱ ㅅㅎㄷ ㄹ ㄱㅎㄷ (ㅂ ㅅ ㅂㄹ ㄴ ㅂㅎㅁ)ㅎㄴ', '-2')
_test('ㄷ (ㅂ ㅅ ㅂㄹ ㄴ ㅂㅎㅁ)ㅎㄴ', '2')
_test('ㄷㄱ (ㅂ ㅅ ㅂㄹ ㄴ ㅂㅎㅁ)ㅎㄴ', '-2')
_test('ㄷ ㄴㄱ ㅅㅎㄷ ㅂ ㄱㅎㄷ (ㅂ ㅅ ㅂㄹ ㄴ ㅂㅎㅁ)ㅎㄴ', '2')
_test('ㄷㄱ ㄴㄱ ㅅㅎㄷ ㅂ ㄱㅎㄷ (ㅂ ㅅ ㅂㄹ ㄴ ㅂㅎㅁ)ㅎㄴ', '-3')
def test_round(self):
_test = self._assert_execute
_test('ㄱ (ㅂ ㅅ ㅂㄹ ㄷ ㅂㅎㅁ)ㅎㄴ', '0')
_test('ㅁ ㄴㄱ ㅅㅎㄷ (ㅂ ㅅ ㅂㄹ ㄷ ㅂㅎㅁ)ㅎㄴ', '0')
_test('ㅁㄱ ㄴㄱ ㅅㅎㄷ (ㅂ ㅅ ㅂㄹ ㄷ ㅂㅎㅁ)ㅎㄴ', '0')
_test('ㄷ ㄴㄱ ㅅㅎㄷ (ㅂ ㅅ ㅂㄹ ㄷ ㅂㅎㅁ)ㅎㄴ', '0')
_test('ㄷㄱ ㄴㄱ ㅅㅎㄷ (ㅂ ㅅ ㅂㄹ ㄷ ㅂㅎㅁ)ㅎㄴ', '0')
_test('ㄴ (ㅂ ㅅ ㅂㄹ ㄷ ㅂㅎㅁ)ㅎㄴ', '1')
_test('ㄴㄱ (ㅂ ㅅ ㅂㄹ ㄷ ㅂㅎㅁ)ㅎㄴ', '-1')
_test('ㄷ ㄴㄱ ㅅㅎㄷ ㄹ ㄱㅎㄷ (ㅂ ㅅ ㅂㄹ ㄷ ㅂㅎㅁ)ㅎㄴ', '2')
_test('ㄷㄱ ㄴㄱ ㅅㅎㄷ ㄹ ㄱㅎㄷ (ㅂ ㅅ ㅂㄹ ㄷ ㅂㅎㅁ)ㅎㄴ', '-2')
_test('ㄷ (ㅂ ㅅ ㅂㄹ ㄷ ㅂㅎㅁ)ㅎㄴ', '2')
_test('ㄷㄱ (ㅂ ㅅ ㅂㄹ ㄷ ㅂㅎㅁ)ㅎㄴ', '-2')
_test('ㄷ ㄴㄱ ㅅㅎㄷ ㅂ ㄱㅎㄷ (ㅂ ㅅ ㅂㄹ ㄷ ㅂㅎㅁ)ㅎㄴ', '2')
_test('ㄷㄱ ㄴㄱ ㅅㅎㄷ ㅂ ㄱㅎㄷ (ㅂ ㅅ ㅂㄹ ㄷ ㅂㅎㅁ)ㅎㄴ', '-2')
def test_ceil(self):
_test = self._assert_execute
_test('ㄱ (ㅂ ㅅ ㅂㄹ ㄹ ㅂㅎㅁ)ㅎㄴ', '0')
_test('ㅁ ㄴㄱ ㅅㅎㄷ (ㅂ ㅅ ㅂㄹ ㄹ ㅂㅎㅁ)ㅎㄴ', '1')
_test('ㅁㄱ ㄴㄱ ㅅㅎㄷ (ㅂ ㅅ ㅂㄹ ㄹ ㅂㅎㅁ)ㅎㄴ', '0')
_test('ㄷ ㄴㄱ ㅅㅎㄷ (ㅂ ㅅ ㅂㄹ ㄹ ㅂㅎㅁ)ㅎㄴ', '1')
_test('ㄷㄱ ㄴㄱ ㅅㅎㄷ (ㅂ ㅅ ㅂㄹ ㄹ ㅂㅎㅁ)ㅎㄴ', '0')
_test('ㄴ (ㅂ ㅅ ㅂㄹ ㄹ ㅂㅎㅁ)ㅎㄴ', '1')
_test('ㄴㄱ (ㅂ ㅅ ㅂㄹ ㄹ ㅂㅎㅁ)ㅎㄴ', '-1')
_test('ㄷ ㄴㄱ ㅅㅎㄷ ㄹ ㄱㅎㄷ (ㅂ ㅅ ㅂㄹ ㄹ ㅂㅎㅁ)ㅎㄴ', '2')
_test('ㄷㄱ ㄴㄱ ㅅㅎㄷ ㄹ ㄱㅎㄷ (ㅂ ㅅ ㅂㄹ ㄹ ㅂㅎㅁ)ㅎㄴ', '-1')
_test('ㄷ (ㅂ ㅅ ㅂㄹ ㄹ ㅂㅎㅁ)ㅎㄴ', '2')
_test('ㄷㄱ (ㅂ ㅅ ㅂㄹ ㄹ ㅂㅎㅁ)ㅎㄴ', '-2')
_test('ㄷ ㄴㄱ ㅅㅎㄷ ㅂ ㄱㅎㄷ (ㅂ ㅅ ㅂㄹ ㄹ ㅂㅎㅁ)ㅎㄴ', '3')
_test('ㄷㄱ ㄴㄱ ㅅㅎㄷ ㅂ ㄱㅎㄷ (ㅂ ㅅ ㅂㄹ ㄹ ㅂㅎㅁ)ㅎㄴ', '-2')
def test_round_to_inf(self):
_test = self._assert_execute
_test('ㄱ (ㅂ ㅅ ㅂㄹ ㅁ ㅂㅎㅁ)ㅎㄴ', '0')
_test('ㅁ ㄴㄱ ㅅㅎㄷ (ㅂ ㅅ ㅂㄹ ㅁ ㅂㅎㅁ)ㅎㄴ', '1')
_test('ㅁㄱ ㄴㄱ ㅅㅎㄷ (ㅂ ㅅ ㅂㄹ ㅁ ㅂㅎㅁ)ㅎㄴ', '-1')
_test('ㄷ ㄴㄱ ㅅㅎㄷ (ㅂ ㅅ ㅂㄹ ㅁ ㅂㅎㅁ)ㅎㄴ', '1')
_test('ㄷㄱ ㄴㄱ ㅅㅎㄷ (ㅂ ㅅ ㅂㄹ ㅁ ㅂㅎㅁ)ㅎㄴ', '-1')
_test('ㄴ (ㅂ ㅅ ㅂㄹ ㅁ ㅂㅎㅁ)ㅎㄴ', '1')
_test('ㄴㄱ (ㅂ ㅅ ㅂㄹ ㅁ ㅂㅎㅁ)ㅎㄴ', '-1')
_test('ㄷ ㄴㄱ ㅅㅎㄷ ㄹ ㄱㅎㄷ (ㅂ ㅅ ㅂㄹ ㅁ ㅂㅎㅁ)ㅎㄴ', '2')
_test('ㄷㄱ ㄴㄱ ㅅㅎㄷ ㄹ ㄱㅎㄷ (ㅂ ㅅ ㅂㄹ ㅁ ㅂㅎㅁ)ㅎㄴ', '-2')
_test('ㄷ (ㅂ ㅅ ㅂㄹ ㅁ ㅂㅎㅁ)ㅎㄴ', '2')
_test('ㄷㄱ (ㅂ ㅅ ㅂㄹ ㅁ ㅂㅎㅁ)ㅎㄴ', '-2')
_test('ㄷ ㄴㄱ ㅅㅎㄷ ㅂ ㄱㅎㄷ (ㅂ ㅅ ㅂㄹ ㅁ ㅂㅎㅁ)ㅎㄴ', '3')
_test('ㄷㄱ ㄴㄱ ㅅㅎㄷ ㅂ ㄱㅎㄷ (ㅂ ㅅ ㅂㄹ ㅁ ㅂㅎㅁ)ㅎㄴ', '-3')
|
12335
|
from enum import Enum as _Enum
class UsageType(_Enum):
CONTROL_LINEAR = ()
CONTROL_ON_OFF = ()
CONTROL_MOMENTARY = ()
CONTROL_ONE_SHOT = ()
CONTROL_RE_TRIGGER = ()
DATA_SELECTOR = ()
DATA_STATIC_VALUE = ()
DATA_STATIC_FLAG = ()
DATA_DYNAMIC_VALUE = ()
DATA_DYNAMIC_FLAG = ()
COLLECTION_NAMED_ARRAY = ()
COLLECTION_APPLICATION = ()
COLLECTION_LOGICAL = ()
COLLECTION_PHYSICAL = ()
COLLECTION_USAGE_SWITCH = ()
COLLECTION_USAGE_MODIFIER = ()
def __new__(cls):
value = len(cls.__members__) + 1
obj = object.__new__(cls)
obj._value_ = value
return obj
@classmethod
def control_usage_types(cls):
return (
UsageType.CONTROL_LINEAR,
UsageType.CONTROL_ON_OFF,
UsageType.CONTROL_MOMENTARY,
UsageType.CONTROL_ONE_SHOT,
UsageType.CONTROL_RE_TRIGGER,
)
@classmethod
def data_usage_types(cls):
return (
UsageType.DATA_SELECTOR,
UsageType.DATA_STATIC_VALUE,
UsageType.DATA_STATIC_FLAG,
UsageType.DATA_DYNAMIC_VALUE,
UsageType.DATA_DYNAMIC_FLAG,
)
@classmethod
def collection_usage_types(cls):
return (
UsageType.COLLECTION_NAMED_ARRAY,
# UsageType.collection_application, # Commented out as it is used for top level collections only
UsageType.COLLECTION_LOGICAL,
UsageType.COLLECTION_PHYSICAL,
UsageType.COLLECTION_USAGE_SWITCH,
UsageType.COLLECTION_USAGE_MODIFIER
)
class Usage:
def __init__(self, value, usage_types):
if not isinstance(usage_types, list):
usage_types = [usage_types,]
for usage_type in usage_types:
if not isinstance(usage_type, UsageType):
raise ValueError("usage_type {} is not instance of {}".format(
usage_type.__class__.__name__,
UsageType.__name__)
)
self.value = value
self.usage_types = usage_types
class UsagePage(_Enum):
def __init__(self, item):
if not isinstance(item, Usage):
raise ValueError("{} is not a valid {}".format(item.__name__, self.__class__.__name__))
self.index = item.value & 0xFFFF
self.usage = item
self.usage_types = item.usage_types
@classmethod
def get_usage(cls, value):
for key, member in cls.__members__.items():
if not isinstance(member.value, Usage):
continue
if member.index == value:
return member
raise ValueError("{} is not a valid {}".format(value, cls.__name__))
@classmethod
def _get_usage_page_index(cls):
raise NotImplementedError()
@classmethod
def find_usage_page(cls, value):
if not hasattr(cls, "usage_page_map"):
cls.usage_page_map = {usage_page._get_usage_page_index(): usage_page for usage_page in cls.__subclasses__()}
if value in cls.usage_page_map.keys():
return cls.usage_page_map[value]
if value not in range(0xFF00,0xFFFF):
raise ValueError("Reserved or missing usage page 0x{:04X}".format(value))
raise NotImplementedError("Yet to support Vendor defined usage pages")
class UsageRange:
def __init__(self, usage_page: UsagePage.__class__ = None, minimum = None, maximum = None):
self.usage_page = usage_page
self.minimum = minimum
self.maximum = maximum
def get_range(self):
if self.minimum is None or self.maximum is None:
raise ValueError("Usage Minimum and Usage Maximum must be set")
if isinstance(self.minimum, UsagePage):
if not isinstance(self.maximum, UsagePage):
raise ValueError("UsageRange type mismatch in minimum and maximum usages")
self.usage_page = self.minimum.__class__
return [self.usage_page.get_usage(value) for value in range(self.minimum.index & 0xFFFF, (self.maximum.index & 0xFFFF) + 1)]
if self.minimum & ~0xFFFF:
self.usage_page = UsagePage.find_usage_page((self.minimum & ~0xFFFF) >> 16)
return [self.usage_page.get_usage(value) for value in range(self.minimum & 0xFFFF, (self.maximum & 0xFFFF) + 1)]
|
12344
|
import time, copy
import asyncio
class TempRefManager:
def __init__(self):
self.refs = []
self.running = False
def add_ref(self, ref, lifetime, on_shutdown):
expiry_time = time.time() + lifetime
self.refs.append((ref, expiry_time, on_shutdown))
def purge_all(self):
"""Purges all refs, regardless of expiry time
Only call this when Seamless is shutting down"""
while len(self.refs):
ref, _, on_shutdown = self.refs.pop(0)
if not on_shutdown:
continue
try:
ref()
except:
pass
def purge(self):
"""Purges expired refs"""
t = time.time()
for item in copy.copy(self.refs):
ref, expiry_time, _ = item
if expiry_time < t:
self.refs.remove(item)
ref()
async def loop(self):
if self.running:
return
self.running = True
while 1:
try:
self.purge()
except Exception:
import traceback
traceback.print_exc()
await asyncio.sleep(0.05)
self.running = False
temprefmanager = TempRefManager()
coro = temprefmanager.loop()
import asyncio
task = asyncio.ensure_future(coro)
import atexit
atexit.register(lambda *args, **kwargs: task.cancel())
|
12354
|
import torch
import json
import os
from torch.utils.data import DataLoader,Dataset
import torchvision.transforms as transforms
from PIL import Image
import numpy as np
data_folder = "./dataset/images"
press_times = json.load(open("./dataset/dataset.json"))
image_roots = [os.path.join(data_folder,image_file) \
for image_file in os.listdir(data_folder)]
class JumpDataset(Dataset):
def __init__(self,transform = None):
self.image_roots = image_roots
self.press_times = press_times
self.transform = transform
def __len__(self):
return len(self.image_roots)
def __getitem__(self,idx):
image_root = self.image_roots[idx]
image_name = image_root.split("/")[-1]
image = Image.open(image_root)
image = image.convert('RGB')
image = image.resize((224,224), resample=Image.LANCZOS)
#image = np.array(image, dtype=np.float32)
if self.transform is not None:
image = self.transform(image)
press_time = self.press_times[image_name]
return image,press_time
def jump_data_loader():
normalize = transforms.Normalize(mean=[0.92206, 0.92206, 0.92206], std=[0.08426, 0.08426, 0.08426])
transform = transforms.Compose([transforms.ToTensor(),normalize])
dataset = JumpDataset(transform=transform)
return DataLoader(dataset,batch_size = 32,shuffle = True)
|
12355
|
import json
from grafana_backup.dashboardApi import create_snapshot
def main(args, settings, file_path):
grafana_url = settings.get('GRAFANA_URL')
http_post_headers = settings.get('HTTP_POST_HEADERS')
verify_ssl = settings.get('VERIFY_SSL')
client_cert = settings.get('CLIENT_CERT')
debug = settings.get('DEBUG')
with open(file_path, 'r') as f:
data = f.read()
snapshot = json.loads(data)
try:
snapshot['name'] = snapshot['dashboard']['title']
except KeyError:
snapshot['name'] = "Untitled Snapshot"
(status, content) = create_snapshot(json.dumps(snapshot), grafana_url, http_post_headers, verify_ssl, client_cert, debug)
if status == 200:
print("create snapshot: {0}, status: {1}, msg: {2}".format(snapshot['name'], status, content))
else:
print("creating snapshot {0} failed with status {1}".format(snapshot['name'], status))
|
12361
|
from numpy import array, rad2deg, pi, mgrid, argmin
from matplotlib.pylab import contour
import matplotlib.pyplot as plt
import mplstereonet
from obspy.imaging.beachball import aux_plane
from focal_mech.lib.classify_mechanism import classify, translate_to_sphharm
from focal_mech.io.read_hash import read_demo, read_hash_solutions
from focal_mech.util.hash_routines import hash_to_classifier
from focal_mech.lib.sph_harm import get_sph_harm
from focal_mech.lib.correlate import corr_shear
hash_solns = read_hash_solutions("example1.out")
# we want solutions that are symetric
polarity_data = read_demo("north1.phase", "scsn.reverse", reverse=True)
inputs = hash_to_classifier(polarity_data, parity=1)
event = 3146815
result = classify(*inputs[event], kernel_degree=2)
Alm = translate_to_sphharm(*result, kernel_degree=2)
coeffs = array([Alm[0,0],
Alm[1,-1], Alm[1,0], Alm[1,1],
Alm[2,-2], Alm[2,-1], Alm[2,0], Alm[2,1], Alm[2,2]])
svm_soln, f = corr_shear(Alm)
resolution = (200,400)
longi, lati, Z = get_sph_harm(resolution=resolution)
mech = coeffs.dot(Z).real
longi.shape = resolution
lati.shape = resolution
mech.shape = resolution
c = contour(longi, lati, mech, [0])
pth1 = c.collections[0].get_paths()[0].vertices
pth1 = rad2deg(pth1)
pth2 = c.collections[0].get_paths()[1].vertices
pth2 = rad2deg(pth2)
hash_focal = rad2deg(hash_solns[event])
event2 = 3158361
result = classify(*inputs[event2], kernel_degree=2)
Alm = translate_to_sphharm(*result, kernel_degree=2)
coeffs = array([Alm[0,0],
Alm[1,-1], Alm[1,0], Alm[1,1],
Alm[2,-2], Alm[2,-1], Alm[2,0], Alm[2,1], Alm[2,2]])
svm_soln2, f = corr_shear(Alm)
resolution = (200,400)
longi, lati, Z = get_sph_harm(resolution=resolution)
mech = coeffs.dot(Z).real
longi.shape = resolution
lati.shape = resolution
mech.shape = resolution
c = contour(longi, lati, mech, [0])
pth3 = c.collections[0].get_paths()[0].vertices
pth3 = rad2deg(pth3)
pth4 = c.collections[0].get_paths()[1].vertices
pth4 = rad2deg(pth4)
hash_focal2 = rad2deg(hash_solns[event2])
event3 = 3153955
result = classify(*inputs[event3], kernel_degree=2)
Alm = translate_to_sphharm(*result, kernel_degree=2)
coeffs = array([Alm[0,0],
Alm[1,-1], Alm[1,0], Alm[1,1],
Alm[2,-2], Alm[2,-1], Alm[2,0], Alm[2,1], Alm[2,2]])
svm_soln3, f = corr_shear(Alm)
resolution = (200,400)
longi, lati, Z = get_sph_harm(resolution=resolution)
mech = coeffs.dot(Z).real
longi.shape = resolution
lati.shape = resolution
mech.shape = resolution
c = contour(longi, lati, mech, [0])
pth5 = c.collections[0].get_paths()[0].vertices
pth5 = rad2deg(pth5)
pth6 = c.collections[0].get_paths()[1].vertices
pth6 = rad2deg(pth6)
hash_focal3 = rad2deg(hash_solns[event3])
fig = plt.figure(facecolor="white", figsize=(10,20))
ax = fig.add_subplot(221, projection='stereonet')
ax.rake(pth1[:,0], pth1[:,1] +90.0, 90.0, ':', color='red', linewidth=3)
ax.rake(pth2[:,0], pth2[:,1] +90.0, 90.0, ':', color='red', linewidth=3)
strike, dip, rake = svm_soln
ax.plane(strike, dip, '-r', linewidth=2)
strike, dip, rake = aux_plane(*svm_soln)
ax.plane(strike, dip, '-r', linewidth=2)
strike, dip, rake = hash_focal
ax.plane(strike-90, dip, 'g-', linewidth=2)
strike, dip, rake = aux_plane(*hash_focal)
ax.plane(strike-90, dip,'g-', linewidth=2)
azi = rad2deg(polarity_data[event][:,0])
toa = rad2deg(polarity_data[event][:,1])
polarity = polarity_data[event][:,2]
for a, t, p in zip(azi, toa, polarity):
if p > 0:
ax.pole(a, t,'o', markeredgecolor='red', markerfacecolor='red')
else:
ax.pole(a, t,'o', markeredgecolor='blue', markerfacecolor='white')
ax.grid()
ax = fig.add_subplot(222, projection='stereonet')
ax.rake(pth3[:,0], pth3[:,1] +90.0, 90.0, ':', color='red', linewidth=3)
ax.rake(pth4[:,0], pth4[:,1] +90.0, 90.0, ':', color='red', linewidth=3)
strike, dip, rake = svm_soln2
ax.plane(strike, dip, '-r', linewidth=2)
strike, dip, rake = aux_plane(*svm_soln2)
ax.plane(strike, dip, '-r', linewidth=2)
strike, dip, rake = hash_focal2
ax.plane(strike-90, dip, 'g-', linewidth=2)
strike, dip, rake = aux_plane(*hash_focal2)
ax.plane(strike-90, dip,'g-', linewidth=2)
azi = rad2deg(polarity_data[event2][:,0])
toa = rad2deg(polarity_data[event2][:,1])
polarity = polarity_data[event2][:,2]
for a, t, p in zip(azi, toa, polarity):
if p > 0:
ax.pole(a, t,'o', markeredgecolor='red', markerfacecolor='red')
else:
ax.pole(a, t,'o', markeredgecolor='blue', markerfacecolor='white')
ax.grid()
ax = fig.add_subplot(224, projection='stereonet')
ax.rake(pth5[:,0], pth5[:,1] +90.0, 90.0, ':', color='red', linewidth=3)
ax.rake(pth6[:,0], pth6[:,1] +90.0, 90.0, ':', color='red', linewidth=3)
strike, dip, rake = svm_soln3
ax.plane(strike, dip, '-r', linewidth=2)
strike, dip, rake = aux_plane(*svm_soln3)
ax.plane(strike, dip, '-r', linewidth=2)
strike, dip, rake = hash_focal3
ax.plane(strike-90, dip, 'g-', linewidth=2)
strike, dip, rake = aux_plane(*hash_focal3)
ax.plane(strike-90, dip,'g-', linewidth=2)
azi = rad2deg(polarity_data[event3][:,0])
toa = rad2deg(polarity_data[event3][:,1])
polarity = polarity_data[event3][:,2]
for a, t, p in zip(azi, toa, polarity):
if p > 0:
ax.pole(a, t,'o', markeredgecolor='red', markerfacecolor='red')
else:
ax.pole(a, t,'o', markeredgecolor='blue', markerfacecolor='white')
ax.grid()
plt.tight_layout(pad=4.0, h_pad=20.0)
plt.show()
|
12443
|
import math
from mathutils import Euler
import bpy
from .portal2_entity_classes import *
from .portal_entity_handlers import PortalEntityHandler
local_entity_lookup_table = PortalEntityHandler.entity_lookup_table.copy()
local_entity_lookup_table.update(entity_class_handle)
class Portal2EntityHandler(PortalEntityHandler):
entity_lookup_table = local_entity_lookup_table
pointlight_power_multiplier = 1000
def handle_prop_weighted_cube(self, entity: prop_weighted_cube, entity_raw: dict):
obj = self._handle_entity_with_model(entity, entity_raw)
self._put_into_collection('prop_weighted_cube', obj, 'props')
def handle_prop_testchamber_door(self, entity: prop_testchamber_door, entity_raw: dict):
obj = self._handle_entity_with_model(entity, entity_raw)
self._put_into_collection('prop_testchamber_door', obj, 'props')
def handle_prop_floor_button(self, entity: prop_floor_button, entity_raw: dict):
obj = self._handle_entity_with_model(entity, entity_raw)
self._put_into_collection('prop_floor_button', obj, 'props')
def handle_prop_floor_ball_button(self, entity: prop_floor_ball_button, entity_raw: dict):
obj = self._handle_entity_with_model(entity, entity_raw)
self._put_into_collection('prop_floor_ball_button', obj, 'props')
def handle_prop_floor_cube_button(self, entity: prop_floor_cube_button, entity_raw: dict):
obj = self._handle_entity_with_model(entity, entity_raw)
self._put_into_collection('prop_floor_cube_button', obj, 'props')
def handle_prop_under_floor_button(self, entity: prop_under_floor_button, entity_raw: dict):
obj = self._handle_entity_with_model(entity, entity_raw)
self._put_into_collection('prop_under_floor_button', obj, 'props')
def handle_prop_tractor_beam(self, entity: prop_tractor_beam, entity_raw: dict):
obj = self._handle_entity_with_model(entity, entity_raw)
self._put_into_collection('prop_tractor_beam', obj, 'props')
def handle_logic_playmovie(self, entity: logic_playmovie, entity_raw: dict):
obj = bpy.data.objects.new(self._get_entity_name(entity), None)
self._set_location(obj, entity.origin)
self._set_icon_if_present(obj, entity)
self._set_entity_data(obj, {'entity': entity_raw})
self._put_into_collection('logic_playmovie', obj, 'logic')
def handle_trigger_paint_cleanser(self, entity: trigger_paint_cleanser, entity_raw: dict):
if 'model' not in entity_raw:
return
model_id = int(entity_raw.get('model')[1:])
mesh_object = self._load_brush_model(model_id, self._get_entity_name(entity))
self._set_location_and_scale(mesh_object, parse_float_vector(entity_raw.get('origin', '0 0 0')))
self._set_rotation(mesh_object, parse_float_vector(entity_raw.get('angles', '0 0 0')))
self._set_entity_data(mesh_object, {'entity': entity_raw})
self._put_into_collection('trigger_paint_cleanser', mesh_object, 'triggers')
def handle_trigger_catapult(self, entity: trigger_catapult, entity_raw: dict):
if 'model' not in entity_raw:
return
model_id = int(entity_raw.get('model')[1:])
mesh_object = self._load_brush_model(model_id, self._get_entity_name(entity))
self._set_location_and_scale(mesh_object, parse_float_vector(entity_raw.get('origin', '0 0 0')))
self._set_rotation(mesh_object, parse_float_vector(entity_raw.get('angles', '0 0 0')))
self._set_entity_data(mesh_object, {'entity': entity_raw})
self._put_into_collection('trigger_catapult', mesh_object, 'triggers')
def handle_npc_wheatley_boss(self, entity: npc_wheatley_boss, entity_raw: dict):
obj = self._handle_entity_with_model(entity, entity_raw)
self._put_into_collection('npc_wheatley_boss', obj, 'npc')
def handle_prop_exploding_futbol(self, entity: prop_exploding_futbol, entity_raw: dict):
obj = self._handle_entity_with_model(entity, entity_raw)
self._put_into_collection('prop_exploding_futbol', obj, 'props')
def handle_prop_exploding_futbol_socket(self, entity: prop_exploding_futbol_socket, entity_raw: dict):
obj = self._handle_entity_with_model(entity, entity_raw)
self._put_into_collection('prop_exploding_futbol', obj, 'props')
def handle_prop_exploding_futbol_spawnert(self, entity: prop_exploding_futbol_spawner, entity_raw: dict):
obj = self._handle_entity_with_model(entity, entity_raw)
self._put_into_collection('prop_exploding_futbol_spawner', obj, 'props')
|
12454
|
import sys
import webbrowser
import os
from comicstreamerlib.folders import AppFolders
from PyQt4 import QtGui,QtCore
class SystemTrayIcon(QtGui.QSystemTrayIcon):
def __init__(self, icon, app):
QtGui.QSystemTrayIcon.__init__(self, icon, None)
self.app = app
self.menu = QtGui.QMenu(None)
exitAction = self.menu.addAction("Exit")
self.setContextMenu(self.menu)
exitAction.triggered.connect( self.quit )
def quit(self):
QtCore.QCoreApplication.quit()
class QtBasedGui():
def __init__(self, apiServer):
self.apiServer = apiServer
self.app = QtGui.QApplication(sys.argv)
pixmap = QtGui.QPixmap(AppFolders.imagePath("trout.png"))
icon = QtGui.QIcon( pixmap.scaled(16,16))
self.trayIcon = SystemTrayIcon(icon,self)
self.trayIcon.show()
def run(self):
try:
self.app.exec_()
except KeyboardInterrupt:
pass
if __name__ == '__main__':
QtGui().run()
|
12532
|
from urllib.parse import urlparse
from quart import current_app as app, request, jsonify
def filter_referrers():
filters = app.config.get('REFERRERS_FILTER')
if not filters:
return None
referrer = request.referrer
if referrer:
parsed = urlparse(referrer)
for filter in filters:
if parsed.hostname.endswith(filter):
return None
return jsonify({
'ok': False,
'error': 'Unauthorized',
}), 403
|
12535
|
from django.contrib import admin
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from . import models
if settings.HAS_ADDITIONAL_USER_DATA:
try:
class UserProfileInline(admin.TabularInline):
model = models.UserProfile
extra = 0
except (Exception, KeyError) as e:
raise ImproperlyConfigured("User/admin.py:: Multi Vendor is turned on.")
class UserAdmin(admin.ModelAdmin):
list_display = ['get_full_name', 'email', 'is_verified']
search_fields = ['get_full_name', 'email', 'date_joined', 'username']
list_filter = ('groups',)
if settings.HAS_ADDITIONAL_USER_DATA:
inlines = [ UserProfileInline, ]
def save_model(self, request, obj, form, change):
if 'password' in form.changed_data:
obj.set_password(request.POST['password'])
obj.save()
admin.site.register(models.User, UserAdmin)
admin.site.register(models.IpAddress)
admin.site.register(models.CityFromIpAddress)
admin.site.register(models.Marketing)
|
12558
|
import data_algebra
import data_algebra.test_util
from data_algebra.data_ops import * # https://github.com/WinVector/data_algebra
import data_algebra.util
import data_algebra.SQLite
def test_compount_where_and():
d = data_algebra.default_data_model.pd.DataFrame(
{
"a": ["a", "b", None, None],
"b": ["c", None, "d", None],
"x": [1, 2, None, None],
"y": [3, None, 4, None],
}
)
ops = describe_table(d, table_name="d").select_rows(
'a == "a" and b == "c" and x > 0 and y < 4'
)
db_handle = data_algebra.SQLite.SQLiteModel().db_handle(conn=None)
sql = db_handle.to_sql(ops)
assert isinstance(sql, str)
expect = data_algebra.default_data_model.pd.DataFrame(
{"a": ["a"], "b": ["c"], "x": [1.0], "y": [3.0],}
)
data_algebra.test_util.check_transform(ops=ops, data=d, expect=expect)
def test_compount_where_amp():
d = data_algebra.default_data_model.pd.DataFrame(
{
"a": ["a", "b", None, None],
"b": ["c", None, "d", None],
"x": [1, 2, None, None],
"y": [3, None, 4, None],
}
)
ops = describe_table(d, table_name="d").select_rows(
'a == "a" & b == "c" & x > 0 & y < 4'
)
db_handle = data_algebra.SQLite.SQLiteModel().db_handle(conn=None)
sql = db_handle.to_sql(ops)
assert isinstance(sql, str)
expect = data_algebra.default_data_model.pd.DataFrame(
{"a": ["a"], "b": ["c"], "x": [1.0], "y": [3.0],}
)
data_algebra.test_util.check_transform(ops=ops, data=d, expect=expect)
|
12592
|
from pheasant.renderers.jupyter.jupyter import Jupyter
jupyter = Jupyter()
jupyter.findall("{{3}}3{{5}}")
jupyter.page
|
12598
|
import tensorflow as tf
from ..fastspeech.model import (
TFFastSpeechEncoder,
TFTacotronPostnet,
TFFastSpeechLayer,
)
from ..speechsplit.model import InterpLnr
import numpy as np
import copy
class Encoder_6(tf.keras.layers.Layer):
def __init__(self, config, hparams, **kwargs):
super(Encoder_6, self).__init__(name='Encoder_6', **kwargs)
self.dim_neck_3 = hparams.dim_neck_3
self.freq_3 = hparams.freq_3
self.dim_f0 = hparams.dim_f0
self.dim_enc_3 = hparams.dim_enc_3
self.dim_emb = hparams.dim_spk_emb
self.chs_grp = hparams.chs_grp
self.before_dense_1 = tf.keras.layers.Dense(
units=self.dim_enc_3, dtype=tf.float32, name='before_dense_1'
)
config_1 = copy.deepcopy(config)
config_1.hidden_size = self.dim_enc_3
self.layer_1 = [
TFFastSpeechLayer(config_1, name='layer_._{}'.format(i))
for i in range(config_1.num_hidden_layers)
]
self.encoder_dense_1 = tf.keras.layers.Dense(
units=self.dim_neck_3,
dtype=tf.float32,
name='encoder_dense_1',
)
self.interp = InterpLnr(hparams)
def call(self, x, attention_mask, training=True):
x = self.before_dense_1(x)
for no, layer_module in enumerate(self.layer_1):
x = layer_module([x, attention_mask], training=training)[0]
x = self.interp(
x,
tf.tile([tf.shape(x)[1]], [tf.shape(x)[0]]),
training=training,
)
x = self.encoder_dense_1(x)
return x
class Encoder_7(tf.keras.layers.Layer):
def __init__(self, config, hparams, **kwargs):
super(Encoder_7, self).__init__(name='Encoder_7', **kwargs)
self.config = config
self.dim_neck = hparams.dim_neck
self.dim_neck_3 = hparams.dim_neck_3
self.dim_freq = hparams.dim_freq
self.dim_enc = hparams.dim_enc
self.dim_enc_3 = hparams.dim_enc_3
self.before_dense_1 = tf.keras.layers.Dense(
units=self.dim_enc, dtype=tf.float32, name='before_dense_1'
)
self.before_dense_2 = tf.keras.layers.Dense(
units=self.dim_enc_3, dtype=tf.float32, name='before_dense_2'
)
config_1 = copy.deepcopy(config)
config_1.hidden_size = self.dim_enc
self.layer_1 = [
TFFastSpeechLayer(config_1, name='layer_._{}'.format(i))
for i in range(config_1.num_hidden_layers)
]
config_2 = copy.deepcopy(config)
config_2.hidden_size = self.dim_enc_3
self.layer_2 = [
TFFastSpeechLayer(config_2, name='layer_._{}'.format(i))
for i in range(config_2.num_hidden_layers)
]
self.encoder_dense_1 = tf.keras.layers.Dense(
units=self.dim_neck, dtype=tf.float32, name='encoder_dense_1'
)
self.encoder_dense_2 = tf.keras.layers.Dense(
units=self.dim_neck_3,
dtype=tf.float32,
name='encoder_dense_2',
)
self.interp = InterpLnr(hparams)
def call(self, x_f0, attention_mask, training=True):
x = x_f0[:, :, : self.dim_freq]
f0 = x_f0[:, :, self.dim_freq:]
x = self.before_dense_1(x)
f0 = self.before_dense_2(f0)
seq_length = tf.shape(x_f0)[1]
for no, layer_module in enumerate(self.layer_1):
x = layer_module([x, attention_mask], training=training)[0]
f0 = self.layer_2[no]([f0, attention_mask], training=training)[0]
x_f0 = tf.concat((x, f0), axis=2)
x_f0 = self.interp(
x_f0,
tf.tile([tf.shape(x_f0)[1]], [tf.shape(x)[0]]),
training=training,
)
x = x_f0[:, :, : self.dim_enc]
f0 = x_f0[:, :, self.dim_enc:]
x = x_f0[:, :, : self.dim_enc]
f0 = x_f0[:, :, self.dim_enc:]
x = self.encoder_dense_1(x)
f0 = self.encoder_dense_2(f0)
return x, f0
class Encoder_t(tf.keras.layers.Layer):
def __init__(self, config, hparams, **kwargs):
super(Encoder_t, self).__init__(name='Encoder_t', **kwargs)
self.dim_neck_2 = hparams.dim_neck_2
self.freq_2 = hparams.freq_2
self.dim_freq = hparams.dim_freq
self.dim_enc_2 = hparams.dim_enc_2
self.dim_emb = hparams.dim_spk_emb
self.chs_grp = hparams.chs_grp
config = copy.deepcopy(config)
config.num_hidden_layers = 1
config.hidden_size = self.dim_enc_2
self.config = config
self.before_dense = tf.keras.layers.Dense(
units=self.dim_enc_2, dtype=tf.float32, name='before_dense_1'
)
self.encoder = TFFastSpeechEncoder(config, name='encoder')
self.encoder_dense = tf.keras.layers.Dense(
units=self.dim_neck_2, dtype=tf.float32, name='encoder_dense'
)
def call(self, x, attention_mask, training=True):
x = self.before_dense(x)
seq_length = tf.shape(x)[1]
f = self.encoder([x, attention_mask], training=training)[0]
return self.encoder_dense(f)
class Decoder_3(tf.keras.layers.Layer):
def __init__(self, config, hparams, **kwargs):
super(Decoder_3, self).__init__(name='Decoder_3', **kwargs)
self.config = config
self.encoder = TFFastSpeechEncoder(config, name='encoder')
self.before_dense = tf.keras.layers.Dense(
units=config.hidden_size,
dtype=tf.float32,
name='before_dense_1',
)
self.linear_projection = tf.keras.layers.Dense(
units=hparams.dim_freq,
dtype=tf.float32,
name='self.linear_projection',
)
def call(self, x, attention_mask, training=True):
x = self.before_dense(x)
seq_length = tf.shape(x)[1]
f = self.encoder([x, attention_mask], training=training)[0]
return self.linear_projection(f)
class Decoder_4(tf.keras.layers.Layer):
def __init__(self, config, hparams, **kwargs):
super(Decoder_4, self).__init__(name='Decoder_4', **kwargs)
self.config = config
self.encoder = TFFastSpeechEncoder(config, name='encoder')
self.before_dense = tf.keras.layers.Dense(
units=config.hidden_size,
dtype=tf.float32,
name='before_dense_1',
)
self.linear_projection = tf.keras.layers.Dense(
units=hparams.dim_f0,
dtype=tf.float32,
name='self.linear_projection',
)
def call(self, x, attention_mask, training=True):
x = self.before_dense(x)
seq_length = tf.shape(x)[1]
f = self.encoder([x, attention_mask], training=training)[0]
return self.linear_projection(f)
class Model(tf.keras.Model):
def __init__(self, config, hparams, **kwargs):
super(Model, self).__init__(name='speechsplit', **kwargs)
self.encoder_1 = Encoder_7(
config.encoder_self_attention_params, hparams
)
self.encoder_2 = Encoder_t(
config.encoder_self_attention_params, hparams
)
self.decoder = Decoder_3(config.decoder_self_attention_params, hparams)
self.freq = hparams.freq
self.freq_2 = hparams.freq_2
self.freq_3 = hparams.freq_3
def call(self, x_f0, x_org, c_trg, mel_lengths, training=True):
max_length = tf.cast(tf.reduce_max(mel_lengths), tf.int32)
attention_mask = tf.sequence_mask(
lengths=mel_lengths, maxlen=max_length, dtype=tf.float32
)
attention_mask.set_shape((None, None))
codes_x, codes_f0 = self.encoder_1(
x_f0, attention_mask, training=training
)
codes_2 = self.encoder_2(x_org, attention_mask, training=training)
code_exp_1 = codes_x
code_exp_3 = codes_f0
code_exp_2 = codes_2
c_trg = tf.tile(tf.expand_dims(c_trg, 1), (1, tf.shape(x_f0)[1], 1))
encoder_outputs = tf.concat(
(code_exp_1, code_exp_2, code_exp_3, c_trg), axis=-1
)
mel_outputs = self.decoder(
encoder_outputs, attention_mask, training=training
)
return codes_x, codes_f0, codes_2, encoder_outputs, mel_outputs
class Model_F0(tf.keras.Model):
def __init__(self, config, hparams, **kwargs):
super(Model_F0, self).__init__(name='speechsplit_f0', **kwargs)
self.encoder_2 = Encoder_t(
config.encoder_self_attention_params, hparams
)
self.encoder_3 = Encoder_6(
config.encoder_self_attention_params, hparams
)
self.decoder = Decoder_4(config.decoder_self_attention_params, hparams)
self.freq_2 = hparams.freq_2
self.freq_3 = hparams.freq_3
def call(self, x_org, f0_trg, mel_lengths, training=True):
max_length = tf.cast(tf.reduce_max(mel_lengths), tf.int32)
attention_mask = tf.sequence_mask(
lengths=mel_lengths, maxlen=max_length, dtype=tf.float32
)
attention_mask.set_shape((None, None))
codes_2 = self.encoder_2(x_org, attention_mask, training=training)
code_exp_2 = codes_2
codes_3 = self.encoder_3(f0_trg, attention_mask, training=training)
code_exp_3 = codes_3
self.o = [code_exp_2, code_exp_3]
encoder_outputs = tf.concat((code_exp_2, code_exp_3), axis=-1)
mel_outputs = self.decoder(
encoder_outputs, attention_mask, training=training
)
return codes_2, codes_3, encoder_outputs, mel_outputs
|
12605
|
from ..utilities import (
has_same_structure,
is_equivalent_molecule,
is_equivalent_building_block,
are_equivalent_functional_groups,
)
def test_with_functional_groups(building_block, get_functional_groups):
"""
Test :meth:`.BuildingBlock.with_functional_groups`.
Parameters
----------
building_block : :class:`.BuildingBlock`
The building block to test.
get_functional_groups : :class:`callable`
Takes a single parameter, `building_block` and returns the
`functional_groups` parameter to use for this test.
Returns
-------
None : :class:`NoneType`
"""
# Save clone to check immutability.
clone = building_block.clone()
_test_with_functional_groups(
building_block=building_block,
functional_groups=tuple(get_functional_groups(building_block)),
)
is_equivalent_building_block(building_block, clone)
has_same_structure(building_block, clone)
def _test_with_functional_groups(building_block, functional_groups):
"""
Test :meth:`.BuildingBlock.with_functional_groups`.
Parameters
----------
building_block : :class:`.BuildingBlock`
The building block to test.
functional_groups : :class:`tuple` of :class:`.FunctionalGroup`
The functional groups the new building block should hold.
Returns
-------
None : :class:`NoneType`
"""
new = building_block.with_functional_groups(functional_groups)
are_equivalent_functional_groups(
new.get_functional_groups(),
functional_groups,
)
is_equivalent_molecule(building_block, new)
has_same_structure(building_block, new)
|
12651
|
from pathlib import Path
import logging
from .logger import Logger
from .log_formatter import LogFormatter
class FileLogger(Logger):
fmt = LogFormatter(use_colour=False, output_ts=False)
logger = None
def __init__(self, folder, format=None):
if format is None:
format = ("%(asctime)s|%(levelname)s|%(message)s",)
formatter = logging.Formatter(format)
log_file = Path(folder, "sayn.log")
if not log_file.parent.exists():
log_file.parent.mkdir(parents=True)
handler = logging.FileHandler(log_file)
handler.setLevel(logging.DEBUG)
handler.setFormatter(formatter)
logger = logging.getLogger(__name__)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
self.logger = logger
def print(self, s=None):
if s is not None:
if s["level"] == "info":
func = self.logger.info
elif s["level"] == "error":
func = self.logger.error
elif s["level"] == "warning":
func = self.logger.warning
else:
func = self.logger.debug
s = s["message"]
if isinstance(s, str):
s = [s]
elif not isinstance(s, list):
raise ValueError("error in logging print")
func(f"{s[0]}")
for e in s[1:]:
for l in e.split("\n"):
func(f"{l}")
|
12654
|
from flask import request, session, url_for
from requests_oauthlib import OAuth2Session
class OAuth2Login(object):
def __init__(self, app=None):
if app:
self.init_app(app)
self.app = app
def get_config(self, app, name, default_value=None):
return app.config.get(self.config_prefix + name, default_value)
def init_app(self, app):
self.client_id = self.get_config(app, "CLIENT_ID")
self.client_secret = self.get_config(app, "CLIENT_SECRET")
self.scope = self.get_config(app, "SCOPE", self.default_scope).split(",")
self.redirect_scheme = self.get_config(app, "REDIRECT_SCHEME", "https")
app.add_url_rule(
self.get_config(app, "REDIRECT_PATH", self.default_redirect_path),
self.redirect_endpoint,
self.login,
)
@property
def redirect_uri(self):
return url_for(
self.redirect_endpoint,
_external=True,
_scheme=self.redirect_scheme,
)
def session(self):
return OAuth2Session(
self.client_id,
redirect_uri=self.redirect_uri,
scope=self.scope,
)
def authorization_url(self, **kwargs):
sess = self.session()
auth_url, state = sess.authorization_url(self.auth_url, **kwargs)
session[self.state_session_key] = state
return auth_url
def login(self):
sess = self.session()
# Get token
try:
sess.fetch_token(
self.token_url,
code=request.args["code"],
client_secret=self.client_secret,
)
# TODO: Check state
except Warning:
# Ignore warnings
pass
except Exception as e:
return self.login_failure_func(e)
# Get profile
try:
profile = self.get_profile(sess)
except Exception as e:
return self.login_failure_func(e)
return self.login_success_func(sess.token, profile)
def login_success(self, f):
self.login_success_func = f
return f
def login_failure(self, f):
self.login_failure_func = f
return f
def get_profile(self, sess):
raise NotImplementedError
|
12671
|
from typing import List
from pydantic import BaseModel
from icolos.core.containers.compound import Conformer, unroll_conformers
from icolos.utils.enums.step_enums import StepRMSDEnum, StepDataManipulationEnum
from icolos.core.workflow_steps.step import _LE
from icolos.core.workflow_steps.calculation.base import StepCalculationBase
_SR = StepRMSDEnum()
_SDM = StepDataManipulationEnum()
class StepRMSD(StepCalculationBase, BaseModel):
def __init__(self, **data):
super().__init__(**data)
# extend parameters
if _SR.METHOD not in self.settings.additional.keys():
self.settings.additional[_SR.METHOD] = _SR.METHOD_ALIGNMOL
def _calculate_RMSD(self, conformers: List[Conformer]):
for conf in conformers:
rmsd_matrix = self._calculate_rms_matrix(
conformers=[conf] + conf.get_extra_data()[_SDM.KEY_MATCHED],
rms_method=self._get_rms_method(),
)
# use the specified tag name if it is the first value and append an index in case there are more
for idx, col in enumerate(rmsd_matrix.columns[1:]):
combined_tag = "".join([_SR.RMSD_TAG, "" if idx == 0 else str(idx)])
rmsd_value = rmsd_matrix.iloc[[0]][col][0]
conf.get_molecule().SetProp(combined_tag, str(rmsd_value))
conf.get_extra_data()[_SDM.KEY_MATCHED][idx].get_molecule().SetProp(
combined_tag, str(rmsd_value)
)
def execute(self):
# this assumes that the conformers that are to be matched for the calculation of the RMSD matrix, are attached
# as a list in a generic data field with a specified key
conformers = unroll_conformers(compounds=self.get_compounds())
self._calculate_RMSD(conformers=conformers)
self._logger.log(
f"Annotated {len(conformers)} conformers with RMSD values (tag: {_SR.RMSD_TAG}).",
_LE.INFO,
)
# TODO: add a nice pandas DF with the RMSD values to a generic data field
|
12678
|
from enum import Enum
import pytest
import gino
from gino.dialects.aiomysql import AsyncEnum
pytestmark = pytest.mark.asyncio
db = gino.Gino()
class MyEnum(Enum):
ONE = "one"
TWO = "two"
class Blog(db.Model):
__tablename__ = "s_blog"
id = db.Column(db.BigInteger(), primary_key=True)
title = db.Column(db.Unicode(255), index=True, comment="Title Comment")
visits = db.Column(db.BigInteger(), default=0)
comment_id = db.Column(db.ForeignKey("s_comment.id"))
number = db.Column(db.Enum(MyEnum), nullable=False, default=MyEnum.TWO)
number2 = db.Column(AsyncEnum(MyEnum), nullable=False, default=MyEnum.TWO)
class Comment(db.Model):
__tablename__ = "s_comment"
id = db.Column(db.BigInteger(), primary_key=True)
blog_id = db.Column(db.ForeignKey("s_blog.id", name="blog_id_fk"))
blog_seq = db.Sequence("blog_seq", metadata=db, schema="schema_test")
async def test(engine, define=True):
async with engine.acquire() as conn:
assert not await engine.dialect.has_table(conn, "non_exist")
Blog.__table__.comment = "Blog Comment"
db.bind = engine
await db.gino.create_all()
await Blog.number.type.create_async(engine, checkfirst=True)
await Blog.number2.type.create_async(engine, checkfirst=True)
await db.gino.create_all(tables=[Blog.__table__], checkfirst=True)
await blog_seq.gino.create(checkfirst=True)
await Blog.__table__.gino.create(checkfirst=True)
await db.gino.drop_all()
await db.gino.drop_all(tables=[Blog.__table__], checkfirst=True)
await Blog.__table__.gino.drop(checkfirst=True)
await blog_seq.gino.drop(checkfirst=True)
if define:
class Comment2(db.Model):
__tablename__ = "s_comment_2"
id = db.Column(db.BigInteger(), primary_key=True)
blog_id = db.Column(db.ForeignKey("s_blog.id"))
await db.gino.create_all()
await db.gino.drop_all()
|
12685
|
from homeassistant.components.climate import ClimateEntity
from homeassistant.components.climate.const import (HVAC_MODE_AUTO,
PRESET_AWAY,
PRESET_COMFORT, PRESET_ECO,
PRESET_NONE,
SUPPORT_PRESET_MODE)
from homeassistant.const import TEMP_CELSIUS
HEATZY_TO_HA_STATE = {
'\u8212\u9002': PRESET_COMFORT,
'\u7ecf\u6d4e': PRESET_ECO,
'\u89e3\u51bb': PRESET_AWAY,
'\u505c\u6b62': PRESET_NONE,
}
HA_TO_HEATZY_STATE = {
PRESET_COMFORT: [1, 1, 0],
PRESET_ECO: [1, 1, 1],
PRESET_AWAY: [1, 1, 2],
PRESET_NONE: [1, 1, 3],
}
class HeatzyPiloteV1Thermostat(ClimateEntity):
def __init__(self, api, device):
self._api = api
self._device = device
@property
def temperature_unit(self):
"""Return the unit of measurement used by the platform."""
return TEMP_CELSIUS
@property
def supported_features(self):
"""Return the list of supported features."""
return SUPPORT_PRESET_MODE
@property
def unique_id(self):
"""Return a unique ID."""
return self._device.get('did')
@property
def name(self):
return self._device.get('dev_alias')
@property
def hvac_modes(self):
"""Return the list of available hvac operation modes.
Need to be a subset of HVAC_MODES.
"""
return [
HVAC_MODE_AUTO
]
@property
def hvac_mode(self):
"""Return hvac operation ie. heat, cool mode.
Need to be one of HVAC_MODE_*.
"""
return HVAC_MODE_AUTO
@property
def preset_modes(self):
"""Return a list of available preset modes.
Requires SUPPORT_PRESET_MODE.
"""
return [
PRESET_NONE,
PRESET_COMFORT,
PRESET_ECO,
PRESET_AWAY
]
@property
def preset_mode(self):
"""Return the current preset mode, e.g., home, away, temp.
Requires SUPPORT_PRESET_MODE.
"""
return HEATZY_TO_HA_STATE.get(self._device.get('attr').get('mode'))
async def async_set_preset_mode(self, preset_mode):
"""Set new preset mode."""
await self._api.async_control_device(self.unique_id, {
'raw': HA_TO_HEATZY_STATE.get(preset_mode),
})
await self.async_update()
async def async_update(self):
"""Retrieve latest state."""
self._device = await self._api.async_get_device(self.unique_id)
|
12699
|
from transformer import Encoder
from torch import nn,optim
from torch.nn.functional import cross_entropy,softmax, relu
from torch.utils.data import DataLoader
from torch.utils.data.dataloader import default_collate
import torch
import utils
import os
import pickle
class GPT(nn.Module):
def __init__(self, model_dim, max_len, num_layer, num_head, n_vocab, lr, max_seg=3, drop_rate=0.2,padding_idx=0):
super().__init__()
self.padding_idx = padding_idx
self.n_vocab = n_vocab
self.max_len = max_len
self.word_emb = nn.Embedding(n_vocab,model_dim)
self.word_emb.weight.data.normal_(0,0.1)
self.segment_emb = nn.Embedding(num_embeddings= max_seg, embedding_dim=model_dim)
self.segment_emb.weight.data.normal_(0,0.1)
self.position_emb = torch.empty(1,max_len,model_dim)
nn.init.kaiming_normal_(self.position_emb,mode='fan_out', nonlinearity='relu')
self.position_emb = nn.Parameter(self.position_emb)
self.encoder = Encoder(n_head=num_head, emb_dim=model_dim, drop_rate=drop_rate, n_layer=num_layer)
self.task_mlm = nn.Linear(in_features=model_dim, out_features=n_vocab)
self.task_nsp = nn.Linear(in_features=model_dim*self.max_len, out_features=2)
self.opt = optim.Adam(self.parameters(),lr)
def forward(self,seqs, segs, training=False):
embed = self.input_emb(seqs, segs)
z = self.encoder(embed, training, mask = self.mask(seqs)) # [n, step, model_dim]
mlm_logits = self.task_mlm(z) # [n, step, n_vocab]
nsp_logits = self.task_nsp(z.reshape(z.shape[0],-1)) # [n, n_cls]
return mlm_logits, nsp_logits
def step(self, seqs, segs, seqs_, nsp_labels):
self.opt.zero_grad()
mlm_logits, nsp_logits = self(seqs, segs, training=True)
pred_loss = cross_entropy(mlm_logits.reshape(-1,self.n_vocab),seqs_.reshape(-1))
nsp_loss = cross_entropy(nsp_logits,nsp_labels.reshape(-1))
loss = pred_loss + 0.2 * nsp_loss
loss.backward()
self.opt.step()
return loss.cpu().data.numpy(), mlm_logits
def input_emb(self,seqs, segs):
# device = next(self.parameters()).device
# self.position_emb = self.position_emb.to(device)
return self.word_emb(seqs) + self.segment_emb(segs) + self.position_emb
def mask(self, seqs):
device = next(self.parameters()).device
batch_size, seq_len = seqs.shape
mask = torch.triu(torch.ones((seq_len,seq_len), dtype=torch.long), diagonal=1).to(device) # [seq_len ,seq_len]
pad = torch.eq(seqs,self.padding_idx) # [n, seq_len]
mask = torch.where(pad[:,None,None,:],1,mask[None,None,:,:]).to(device) # [n, 1, seq_len, seq_len]
return mask>0 # [n, 1, seq_len, seq_len]
@property
def attentions(self):
attentions = {
"encoder": [l.mh.attention.cpu().data.numpy() for l in self.encoder.encoder_layers]
}
return attentions
def train():
MODEL_DIM = 256
N_LAYER = 4
LEARNING_RATE = 1e-4
dataset = utils.MRPCData("./MRPC",2000)
print("num word: ",dataset.num_word)
model = GPT(
model_dim=MODEL_DIM, max_len=dataset.max_len-1, num_layer=N_LAYER, num_head=4, n_vocab=dataset.num_word,
lr=LEARNING_RATE, max_seg=dataset.num_seg, drop_rate=0.2, padding_idx=dataset.pad_id
)
if torch.cuda.is_available():
print("GPU train avaliable")
device =torch.device("cuda")
model = model.cuda()
else:
device = torch.device("cpu")
model = model.cpu()
loader = DataLoader(dataset,batch_size=32,shuffle=True)
for epoch in range(100):
for batch_idx, batch in enumerate(loader):
seqs, segs,xlen,nsp_labels = batch
seqs, segs,nsp_labels = seqs.type(torch.LongTensor).to(device), segs.type(torch.LongTensor).to(device),nsp_labels.to(device)
# pred: [n, step, n_vocab]
loss,pred = model.step(seqs=seqs[:,:-1], segs= segs[:,:-1], seqs_=seqs[:,1:], nsp_labels=nsp_labels)
if batch_idx %100 == 0:
pred = pred[0].cpu().data.numpy().argmax(axis = 1) # [step]
print(
"Epoch: ",epoch,
"|batch: ", batch_idx,
"| loss: %.3f" % loss,
"\n| tgt: ", " ".join([dataset.i2v[i] for i in seqs[0, 1:].cpu().data.numpy()[:xlen[0].sum()+1]]),
"\n| prd: ", " ".join([dataset.i2v[i] for i in pred[:xlen[0].sum()+1]]),
)
os.makedirs("./visual/models/gpt",exist_ok=True)
torch.save(model.state_dict(),"./visual/models/gpt/model.pth")
export_attention(model,device,dataset)
def export_attention(model,device,data,name="gpt"):
model.load_state_dict(torch.load("./visual/models/gpt/model.pth",map_location=device))
seqs, segs,xlen,nsp_labels = data[:32]
seqs, segs,xlen,nsp_labels = torch.from_numpy(seqs),torch.from_numpy(segs),torch.from_numpy(xlen),torch.from_numpy(nsp_labels)
seqs, segs,nsp_labels = seqs.type(torch.LongTensor).to(device), segs.type(torch.LongTensor).to(device),nsp_labels.to(device)
model(seqs[:,:-1],segs[:,:-1],False)
seqs = seqs.cpu().data.numpy()
data = {"src": [[data.i2v[i] for i in seqs[j]] for j in range(len(seqs))], "attentions": model.attentions}
path = "./visual/tmp/%s_attention_matrix.pkl" % name
os.makedirs(os.path.dirname(path), exist_ok=True)
with open(path, "wb") as f:
pickle.dump(data, f)
if __name__ == "__main__":
train()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.