filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_18568 | """
Convert video format x to MP4/H.264.
"""
import os
import sys
import logging
from .videometainfo import VideoMetaInfo
from .utils import sizeof_fmt, time_fmt, find_files, check_dependencies, call, ffmpeg
logger = logging.getLogger(__name__)
class VideoToMP4:
"""To Mp4"""
SUPPORTED_EXTENSIONS = ".wmv, .avi, .mkv, .mov, .flv"
RULES = {
".wmv": "-c:v libx264 -crf 19 ",
".avi":
"-vf yadif=1 -c:v h264_nvenc -preset slow -tune film -crf 17",
".mkv": "-c copy",
".mov": "-vcodec h264 -acodec aac -strict -2 -crf 19 ",
".flv": " -r 20 ",
}
def process(self, video_file: str):
"""Convert video files to MP4 container format."""
name = os.path.splitext(video_file)[0]
ext = os.path.splitext(video_file)[1]
new_name = f"{name}.mp4"
if os.path.exists(new_name):
logger.info(f"Skipping file {new_name} already exists!")
elif ext not in VideoToMP4.RULES:
logger.error(f"Skipping unsupported type {ext}!")
else:
print(f'Convert {ext} to MP4 {new_name} ... ')
meta_info = VideoMetaInfo(video_file)
rule = VideoToMP4.RULES[ext]
flags = "-movflags +faststart -pix_fmt yuv420p"
ffmpeg(
f'-i "{video_file}" {flags} {rule} -metadata date="{meta_info.original_date}" "{new_name}"'
)
def file(self, filename: str) -> None:
logger.debug(f"converting file {filename}")
self.process(filename)
def directory(self, path: str, extension: str) -> int:
files = find_files(path, extension)
if len(files) < 1:
print("No matching files found in directory!", file=sys.stderr)
else:
for f in files:
self.file(f)
|
the-stack_0_18569 | class MenuItem:
def info(self):
# Output in the format '____: $____'
print(self.name + ': $' + str(self.price))
menu_item1 = MenuItem()
menu_item1.name = 'Sandwich'
menu_item1.price = 5
menu_item1.info()
menu_item2 = MenuItem()
menu_item2.name = 'Chocolate Cake'
menu_item2.price = 4
menu_item2.info()
|
the-stack_0_18570 | import os
import torch
import numpy as np
from tqdm import trange
from PIL import Image
def get_state(gpu):
import torch
midas = torch.hub.load("intel-isl/MiDaS", "MiDaS")
if gpu:
midas.cuda()
midas.eval()
midas_transforms = torch.hub.load("intel-isl/MiDaS", "transforms")
transform = midas_transforms.default_transform
state = {"model": midas, "transform": transform}
return state
def depth_to_rgba(x):
assert x.dtype == np.float32
assert len(x.shape) == 2
y = x.copy()
y.dtype = np.uint8
y = y.reshape(x.shape + (4,))
return np.ascontiguousarray(y)
def rgba_to_depth(x):
assert x.dtype == np.uint8
assert len(x.shape) == 3 and x.shape[2] == 4
y = x.copy()
y.dtype = np.float32
y = y.reshape(x.shape[:2])
return np.ascontiguousarray(y)
def run(x, state):
model = state["model"]
transform = state["transform"]
hw = x.shape[:2]
with torch.no_grad():
prediction = model(transform((x + 1.0) * 127.5).cuda())
prediction = torch.nn.functional.interpolate(
prediction.unsqueeze(1), size=hw, mode="bicubic", align_corners=False,
).squeeze()
output = prediction.cpu().numpy()
return output
def get_filename(relpath, level=-2):
# save class folder structure and filename:
fn = relpath.split(os.sep)[level:]
folder = fn[-2]
file = fn[-1].split(".")[0]
return folder, file
def save_depth(dataset, path, debug=False):
os.makedirs(path)
N = len(dset)
if debug:
N = 10
state = get_state(gpu=True)
for idx in trange(N, desc="Data"):
ex = dataset[idx]
image, relpath = ex["image"], ex["relpath"]
folder, filename = get_filename(relpath)
# prepare
folderabspath = os.path.join(path, folder)
os.makedirs(folderabspath, exist_ok=True)
savepath = os.path.join(folderabspath, filename)
# run model
xout = run(image, state)
I = depth_to_rgba(xout)
Image.fromarray(I).save("{}.png".format(savepath))
if __name__ == "__main__":
from taming.data.imagenet import ImageNetTrain, ImageNetValidation
out = "data/imagenet_depth"
if not os.path.exists(out):
print(
"Please create a folder or symlink '{}' to extract depth data ".format(out)
+ "(be prepared that the output size will be larger than ImageNet itself)."
)
exit(1)
# go
dset = ImageNetValidation()
abspath = os.path.join(out, "val")
if os.path.exists(abspath):
print("{} exists - not doing anything.".format(abspath))
else:
print("preparing {}".format(abspath))
save_depth(dset, abspath)
print("done with validation split")
dset = ImageNetTrain()
abspath = os.path.join(out, "train")
if os.path.exists(abspath):
print("{} exists - not doing anything.".format(abspath))
else:
print("preparing {}".format(abspath))
save_depth(dset, abspath)
print("done with train split")
print("done done.")
|
the-stack_0_18571 | from vbench.api import Benchmark
from datetime import datetime
common_setup = """from pandas_vb_common import *
import sqlite3
import sqlalchemy
from sqlalchemy import create_engine
engine = create_engine('sqlite:///:memory:')
con = sqlite3.connect(':memory:')
"""
sdate = datetime(2014, 6, 1)
#-------------------------------------------------------------------------------
# to_sql
setup = common_setup + """
index = [rands(10) for _ in xrange(10000)]
df = DataFrame({'float1' : randn(10000),
'float2' : randn(10000),
'string1' : ['foo'] * 10000,
'bool1' : [True] * 10000,
'int1' : np.random.randint(0, 100000, size=10000)},
index=index)
"""
sql_write_sqlalchemy = Benchmark("df.to_sql('test1', engine, if_exists='replace')",
setup, start_date=sdate)
sql_write_fallback = Benchmark("df.to_sql('test1', con, if_exists='replace')",
setup, start_date=sdate)
#-------------------------------------------------------------------------------
# read_sql
setup = common_setup + """
index = [rands(10) for _ in xrange(10000)]
df = DataFrame({'float1' : randn(10000),
'float2' : randn(10000),
'string1' : ['foo'] * 10000,
'bool1' : [True] * 10000,
'int1' : np.random.randint(0, 100000, size=10000)},
index=index)
df.to_sql('test2', engine, if_exists='replace')
df.to_sql('test2', con, if_exists='replace')
"""
sql_read_query_sqlalchemy = Benchmark("read_sql_query('SELECT * FROM test2', engine)",
setup, start_date=sdate)
sql_read_query_fallback = Benchmark("read_sql_query('SELECT * FROM test2', con)",
setup, start_date=sdate)
sql_read_table_sqlalchemy = Benchmark("read_sql_table('test2', engine)",
setup, start_date=sdate)
#-------------------------------------------------------------------------------
# type specific write
setup = common_setup + """
df = DataFrame({'float' : randn(10000),
'string' : ['foo'] * 10000,
'bool' : [True] * 10000,
'datetime' : date_range('2000-01-01', periods=10000, freq='s')})
df.loc[1000:3000, 'float'] = np.nan
"""
sql_float_write_sqlalchemy = \
Benchmark("df[['float']].to_sql('test_float', engine, if_exists='replace')",
setup, start_date=sdate)
sql_float_write_fallback = \
Benchmark("df[['float']].to_sql('test_float', con, if_exists='replace')",
setup, start_date=sdate)
sql_string_write_sqlalchemy = \
Benchmark("df[['string']].to_sql('test_string', engine, if_exists='replace')",
setup, start_date=sdate)
sql_string_write_fallback = \
Benchmark("df[['string']].to_sql('test_string', con, if_exists='replace')",
setup, start_date=sdate)
sql_datetime_write_sqlalchemy = \
Benchmark("df[['datetime']].to_sql('test_datetime', engine, if_exists='replace')",
setup, start_date=sdate)
#sql_datetime_write_fallback = \
# Benchmark("df[['datetime']].to_sql('test_datetime', con, if_exists='replace')",
# setup3, start_date=sdate)
#-------------------------------------------------------------------------------
# type specific read
setup = common_setup + """
df = DataFrame({'float' : randn(10000),
'datetime' : date_range('2000-01-01', periods=10000, freq='s')})
df['datetime_string'] = df['datetime'].map(str)
df.to_sql('test_type', engine, if_exists='replace')
df[['float', 'datetime_string']].to_sql('test_type', con, if_exists='replace')
"""
sql_float_read_query_sqlalchemy = \
Benchmark("read_sql_query('SELECT float FROM test_type', engine)",
setup, start_date=sdate)
sql_float_read_table_sqlalchemy = \
Benchmark("read_sql_table('test_type', engine, columns=['float'])",
setup, start_date=sdate)
sql_float_read_query_fallback = \
Benchmark("read_sql_query('SELECT float FROM test_type', con)",
setup, start_date=sdate)
sql_datetime_read_as_native_sqlalchemy = \
Benchmark("read_sql_table('test_type', engine, columns=['datetime'])",
setup, start_date=sdate)
sql_datetime_read_and_parse_sqlalchemy = \
Benchmark("read_sql_table('test_type', engine, columns=['datetime_string'], parse_dates=['datetime_string'])",
setup, start_date=sdate)
|
the-stack_0_18573 | import numpy as __np__
from numpy import cos as __cos__
from numpy import sin as __sin__
from numpy import arctan2 as __arctan2__
from numpy import sqrt as __sqrt__
import matplotlib.pyplot as __plt__
from matplotlib import cm as __cm__
from matplotlib.ticker import LinearLocator as __LinearLocator__
from matplotlib.ticker import FormatStrFormatter as __FormatStrFormatter__
import tools as __tools__
class Coefficient(object):
"""
Return a set of Seidel wavsfront aberrations Coefficient
"""
__coefficients__ = []
__seidellist___=["Ap Piston",
"At Tilt",
"Ad Defocus",
"As Astigmatism",
"Ac Coma",
"As Spherical"]
def __init__(self,Ap=0,Bp=0,At=0,Bt=0,Ad=0,Bd=0,Aa=0,Ba=0,Ac=0,Bc=0,As=0,Bs=0):
if type(Ap) == list:
self.__coefficients__ = Ap
else:
self.__coefficients__ = [[Ap,Bp],[At,Bt],[Ad,Bd],[Aa,Ba],[Ac,Bc],[As,Bs]]
def outputcoefficient(self):
return self.__coefficients__
def seidelsurface(self, label = True, zlim=[], matrix = False):
r1 = __np__.linspace(0, 1, 100)
u1 = __np__.linspace(0, 2*__np__.pi, 100)
[u,r] = __np__.meshgrid(u1,r1)
X = r*__cos__(u)
Y = r*__sin__(u)
W = __seidelpolar__(self.__coefficients__,r,u)
fig = __plt__.figure(figsize=(12, 8), dpi=80)
ax = fig.gca(projection='3d')
surf = ax.plot_surface(X, Y, W, rstride=1, cstride=1, cmap=__cm__.RdYlGn,
linewidth=0, antialiased=False, alpha = 0.6)
fig.colorbar(surf, shrink=1, aspect=30)
__plt__.show()
def twyman_green(self, lambda_1 = 632, PR = 1):
lambda_1 = lambda_1*(10**-9)
A = self.__coefficients__
r = __np__.linspace(-PR, PR, 400)
x, y = __np__.meshgrid(r,r)
OPD = __seidelcartesian__(A,x,y)*2/PR
ph = 2 * __np__.pi * OPD
I1 = 1
I2 = 1
Ixy = I1 + I2 + 2 * __np__.sqrt(I1*I2) * __np__.cos(ph)
__tools__.makecircle(Ixy, r, PR)
fig = __plt__.figure(figsize=(9, 6), dpi=80)
__plt__.imshow(-Ixy, extent=[-PR,PR,-PR,PR])
__plt__.set_cmap('Greys')
__plt__.show()
def __seidelpolar__(coefficient,r,u):
W = coefficient
h = 1
Ap = W[0][0] * h**2
At = W[1][0] * h*r*__cos__(u-W[1][1]*__np__.pi/180)
Ad = W[2][0] * r**2
Aa = W[3][0] * h**2*r**2*__cos__(u-W[3][1]*__np__.pi/180)
Ac = W[4][0] * h*r*__cos__(u-W[4][1]*__np__.pi/180)
As = W[5][0] * r**4
W = Ap+At+Ad+Aa+Ac+As
return W
def __seidelcartesian__(coefficient,x,y):
W = coefficient
h = 1
u = __arctan2__(y,x)
r = __sqrt__(x**2+y**2)
W = __seidelpolar__(coefficient,r,u)
return W
|
the-stack_0_18575 | from pathlib import Path
import os
import copy
import sys
import pytest
from ploomber.util.util import add_to_sys_path, chdir_code, requires
from ploomber.util import dotted_path
def test_add_to_sys_path():
path = str(Path('/path/to/add').resolve())
with add_to_sys_path(path, chdir=False):
assert path in sys.path
assert path not in sys.path
def test_add_to_sys_path_with_chdir(tmp_directory):
path = Path('.').resolve() / 'some_directory'
path.mkdir()
path = str(path)
old_dir = os.getcwd()
with add_to_sys_path(path, chdir=True):
assert path in sys.path
assert path == os.getcwd()
assert path not in sys.path
assert old_dir == os.getcwd()
def test_add_to_sys_path_with_none():
original = copy.copy(sys.path)
with add_to_sys_path(None, chdir=False):
assert sys.path == original
assert sys.path == original
def test_add_to_sys_path_with_exception():
path = str(Path('/path/to/add').resolve())
with pytest.raises(Exception):
with add_to_sys_path(path, chdir=False):
assert path in sys.path
raise Exception
assert path not in sys.path
def test_load_dotted_path_custom_error_message():
with pytest.raises(AttributeError) as excinfo:
dotted_path.load_dotted_path('test_pkg.not_a_function')
assert ('Could not get "not_a_function" from module "test_pkg"'
in str(excinfo.value))
def test_load_dotted_path_with_reload(tmp_directory, add_current_to_sys_path):
# write a sample module
Path('dotted_path_with_reload.py').write_text("""
def x():
pass
""")
# load the module
dotted_path.load_dotted_path('dotted_path_with_reload.x')
# add a new function
Path('dotted_path_with_reload.py').write_text("""
def x():
pass
def y():
pass
""")
# the new function should be importable since we are using reload=True
assert dotted_path.load_dotted_path('dotted_path_with_reload.y',
reload=True)
def test_chdir_code(tmp_directory):
# test generated code is valid
eval(chdir_code(tmp_directory))
@pytest.mark.parametrize(
'params, expected',
[
[
dict(pkgs=['p1']),
'p1 is required to use fn. Install it by running "pip install p1"'
],
[
dict(pkgs=['p1'], name='name'),
('p1 is required to use name. Install it by running '
'"pip install p1"')
],
[
dict(pkgs=['p1'], extra_msg='extra'),
('p1 is required to use fn. Install it by running '
'"pip install p1". extra')
],
[
dict(pkgs=['p1', 'p2']),
('p1 p2 are required to use fn. Install them by running '
'"pip install p1 p2"')
],
[
dict(pkgs=['p1'], pip_names=['n1']),
'n1 is required to use fn. Install it by running "pip install n1"'
],
[
# the first package is installed, it shouldn't appear in the error
dict(pkgs=['ploomber', 'p1']),
'p1 is required to use fn. Install it by running "pip install p1"'
],
])
def test_requires(params, expected):
@requires(**params)
def fn():
pass
with pytest.raises(ImportError) as excinfo:
fn()
assert str(excinfo.value) == expected
|
the-stack_0_18576 | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch import nn
from torch.nn import functional as F
import argparse
import sys
sys.path.append('./')
import models
from inference import checkpoint_from_distributed, unwrap_distributed, load_and_setup_model, prepare_input_sequence
from common.utils import to_gpu, get_mask_from_lengths
def parse_args(parser):
"""
Parse commandline arguments.
"""
parser.add_argument('--tacotron2', type=str,
help='Full path to the Tacotron2 model checkpoint file')
parser.add_argument('-o', '--output', type=str, required=True,
help='Directory for the exported Tacotron 2 ONNX model')
parser.add_argument('--fp16', action='store_true',
help='Export with half precision to ONNX')
return parser
def encoder_infer(self, x, input_lengths):
device = x.device
for conv in self.convolutions:
x = F.dropout(F.relu(conv(x.to(device))), 0.5, False)
x = x.transpose(1, 2)
input_lengths_cpu = input_lengths[:]
input_lengths_cpu = input_lengths_cpu.cpu().numpy()
x = nn.utils.rnn.pack_padded_sequence(
x, input_lengths_cpu, batch_first=True)
outputs, _ = self.lstm(x)
outputs, _ = nn.utils.rnn.pad_packed_sequence(
outputs, batch_first=True)
lens = input_lengths*2
return outputs, lens
class Encoder(torch.nn.Module):
def __init__(self, tacotron2):
super(Encoder, self).__init__()
self.tacotron2 = tacotron2
self.tacotron2.encoder.lstm.flatten_parameters()
self.infer = encoder_infer
def forward(self, sequence, sequence_lengths):
embedded_inputs = self.tacotron2.embedding(sequence).transpose(1, 2)
memory, lens = self.infer(self.tacotron2.encoder, embedded_inputs, sequence_lengths)
processed_memory = self.tacotron2.decoder.attention_layer.memory_layer(memory)
return memory, processed_memory, lens
class Postnet(torch.nn.Module):
def __init__(self, tacotron2):
super(Postnet, self).__init__()
self.tacotron2 = tacotron2
def forward(self, mel_outputs):
mel_outputs_postnet = self.tacotron2.postnet(mel_outputs)
return mel_outputs + mel_outputs_postnet
def lstmcell2lstm_params(lstm_mod, lstmcell_mod):
lstm_mod.weight_ih_l0 = torch.nn.Parameter(lstmcell_mod.weight_ih)
lstm_mod.weight_hh_l0 = torch.nn.Parameter(lstmcell_mod.weight_hh)
lstm_mod.bias_ih_l0 = torch.nn.Parameter(lstmcell_mod.bias_ih)
lstm_mod.bias_hh_l0 = torch.nn.Parameter(lstmcell_mod.bias_hh)
def prenet_infer(self, x):
x1 = x[:]
for linear in self.layers:
x1 = F.relu(linear(x1))
x0 = x1[0].unsqueeze(0)
mask = torch.le(torch.rand(256, device='cuda').to(x.dtype), 0.5).to(x.dtype)
mask = mask.expand(x1.size(0), x1.size(1))
x1 = x1*mask*2.0
return x1
class DecoderIter(torch.nn.Module):
def __init__(self, tacotron2):
super(DecoderIter, self).__init__()
self.tacotron2 = tacotron2
dec = tacotron2.decoder
self.p_attention_dropout = dec.p_attention_dropout
self.p_decoder_dropout = dec.p_decoder_dropout
self.prenet = dec.prenet
self.prenet.infer = prenet_infer
self.attention_rnn = nn.LSTM(dec.prenet_dim + dec.encoder_embedding_dim,
dec.attention_rnn_dim, 1)
lstmcell2lstm_params(self.attention_rnn, dec.attention_rnn)
self.attention_rnn.flatten_parameters()
self.attention_layer = dec.attention_layer
self.decoder_rnn = nn.LSTM(dec.attention_rnn_dim + dec.encoder_embedding_dim,
dec.decoder_rnn_dim, 1)
lstmcell2lstm_params(self.decoder_rnn, dec.decoder_rnn)
self.decoder_rnn.flatten_parameters()
self.linear_projection = dec.linear_projection
self.gate_layer = dec.gate_layer
def decode(self, decoder_input, in_attention_hidden, in_attention_cell,
in_decoder_hidden, in_decoder_cell, in_attention_weights,
in_attention_weights_cum, in_attention_context, memory,
processed_memory, mask):
cell_input = torch.cat((decoder_input, in_attention_context), -1)
_, (out_attention_hidden, out_attention_cell) = self.attention_rnn(
cell_input.unsqueeze(0), (in_attention_hidden.unsqueeze(0),
in_attention_cell.unsqueeze(0)))
out_attention_hidden = out_attention_hidden.squeeze(0)
out_attention_cell = out_attention_cell.squeeze(0)
out_attention_hidden = F.dropout(
out_attention_hidden, self.p_attention_dropout, False)
attention_weights_cat = torch.cat(
(in_attention_weights.unsqueeze(1),
in_attention_weights_cum.unsqueeze(1)), dim=1)
out_attention_context, out_attention_weights = self.attention_layer(
out_attention_hidden, memory, processed_memory,
attention_weights_cat, mask)
out_attention_weights_cum = in_attention_weights_cum + out_attention_weights
decoder_input_tmp = torch.cat(
(out_attention_hidden, out_attention_context), -1)
_, (out_decoder_hidden, out_decoder_cell) = self.decoder_rnn(
decoder_input_tmp.unsqueeze(0), (in_decoder_hidden.unsqueeze(0),
in_decoder_cell.unsqueeze(0)))
out_decoder_hidden = out_decoder_hidden.squeeze(0)
out_decoder_cell = out_decoder_cell.squeeze(0)
out_decoder_hidden = F.dropout(
out_decoder_hidden, self.p_decoder_dropout, False)
decoder_hidden_attention_context = torch.cat(
(out_decoder_hidden, out_attention_context), 1)
decoder_output = self.linear_projection(
decoder_hidden_attention_context)
gate_prediction = self.gate_layer(decoder_hidden_attention_context)
return (decoder_output, gate_prediction, out_attention_hidden,
out_attention_cell, out_decoder_hidden, out_decoder_cell,
out_attention_weights, out_attention_weights_cum, out_attention_context)
# @torch.jit.script
def forward(self,
decoder_input,
attention_hidden,
attention_cell,
decoder_hidden,
decoder_cell,
attention_weights,
attention_weights_cum,
attention_context,
memory,
processed_memory,
mask):
decoder_input1 = self.prenet.infer(self.prenet, decoder_input)
outputs = self.decode(decoder_input1,
attention_hidden,
attention_cell,
decoder_hidden,
decoder_cell,
attention_weights,
attention_weights_cum,
attention_context,
memory,
processed_memory,
mask)
return outputs
def test_inference(encoder, decoder_iter, postnet):
encoder.eval()
decoder_iter.eval()
postnet.eval()
from trt.inference_trt import init_decoder_inputs
texts = ["Hello World, good day."]
sequences, sequence_lengths = prepare_input_sequence(texts)
measurements = {}
print("Running Tacotron2 Encoder")
with torch.no_grad():
memory, processed_memory, lens = encoder(sequences, sequence_lengths)
print("Running Tacotron2 Decoder")
device = memory.device
dtype = memory.dtype
mel_lengths = torch.zeros([memory.size(0)], dtype=torch.int32, device = device)
not_finished = torch.ones([memory.size(0)], dtype=torch.int32, device = device)
mel_outputs, gate_outputs, alignments = (torch.zeros(1), torch.zeros(1), torch.zeros(1))
gate_threshold = 0.6
max_decoder_steps = 1000
first_iter = True
(decoder_input, attention_hidden, attention_cell, decoder_hidden,
decoder_cell, attention_weights, attention_weights_cum,
attention_context, memory, processed_memory,
mask) = init_decoder_inputs(memory, processed_memory, sequence_lengths)
while True:
with torch.no_grad():
(mel_output, gate_output,
attention_hidden, attention_cell,
decoder_hidden, decoder_cell,
attention_weights, attention_weights_cum,
attention_context) = decoder_iter(decoder_input, attention_hidden, attention_cell, decoder_hidden,
decoder_cell, attention_weights, attention_weights_cum,
attention_context, memory, processed_memory, mask)
if first_iter:
mel_outputs = torch.unsqueeze(mel_output, 2)
gate_outputs = torch.unsqueeze(gate_output, 2)
alignments = torch.unsqueeze(attention_weights, 2)
first_iter = False
else:
mel_outputs = torch.cat((mel_outputs, torch.unsqueeze(mel_output, 2)), 2)
gate_outputs = torch.cat((gate_outputs, torch.unsqueeze(gate_output, 2)), 2)
alignments = torch.cat((alignments, torch.unsqueeze(attention_weights, 2)), 2)
dec = torch.le(torch.sigmoid(gate_output), gate_threshold).to(torch.int32).squeeze(1)
not_finished = not_finished*dec
mel_lengths += not_finished
if torch.sum(not_finished) == 0:
print("Stopping after ",mel_outputs.size(2)," decoder steps")
break
if mel_outputs.size(2) == max_decoder_steps:
print("Warning! Reached max decoder steps")
break
decoder_input = mel_output
print("Running Tacotron2 PostNet")
with torch.no_grad():
mel_outputs_postnet = postnet(mel_outputs)
return mel_outputs_postnet
def main():
parser = argparse.ArgumentParser(
description='PyTorch Tacotron 2 export to TRT')
parser = parse_args(parser)
args, _ = parser.parse_known_args()
tacotron2 = load_and_setup_model('Tacotron2', parser, args.tacotron2,
fp16_run=args.fp16, cpu_run=False)
opset_version = 10
sequences = torch.randint(low=0, high=148, size=(1,50),
dtype=torch.long).cuda()
sequence_lengths = torch.IntTensor([sequences.size(1)]).cuda().long()
dummy_input = (sequences, sequence_lengths)
encoder = Encoder(tacotron2)
encoder.eval()
with torch.no_grad():
encoder(*dummy_input)
torch.onnx.export(encoder, dummy_input, args.output+"/"+"encoder.onnx",
opset_version=opset_version,
do_constant_folding=True,
input_names=["sequences", "sequence_lengths"],
output_names=["memory", "processed_memory", "lens"],
dynamic_axes={"sequences": {0: "batch_size", 1: "text_seq"},
"memory": {0: "batch_size", 1: "mem_seq"},
"processed_memory": {0: "batch_size", 1: "mem_seq"}
})
decoder_iter = DecoderIter(tacotron2)
memory = torch.randn((1,sequence_lengths[0],512)).cuda() #encoder_outputs
if args.fp16:
memory = memory.half()
memory_lengths = sequence_lengths
# initialize decoder states for dummy_input
decoder_input = tacotron2.decoder.get_go_frame(memory)
mask = get_mask_from_lengths(memory_lengths)
(attention_hidden,
attention_cell,
decoder_hidden,
decoder_cell,
attention_weights,
attention_weights_cum,
attention_context,
processed_memory) = tacotron2.decoder.initialize_decoder_states(memory)
dummy_input = (decoder_input,
attention_hidden,
attention_cell,
decoder_hidden,
decoder_cell,
attention_weights,
attention_weights_cum,
attention_context,
memory,
processed_memory,
mask)
decoder_iter = DecoderIter(tacotron2)
decoder_iter.eval()
with torch.no_grad():
decoder_iter(*dummy_input)
torch.onnx.export(decoder_iter, dummy_input, args.output+"/"+"decoder_iter.onnx",
opset_version=opset_version,
do_constant_folding=True,
input_names=["decoder_input",
"attention_hidden",
"attention_cell",
"decoder_hidden",
"decoder_cell",
"attention_weights",
"attention_weights_cum",
"attention_context",
"memory",
"processed_memory",
"mask"],
output_names=["decoder_output",
"gate_prediction",
"out_attention_hidden",
"out_attention_cell",
"out_decoder_hidden",
"out_decoder_cell",
"out_attention_weights",
"out_attention_weights_cum",
"out_attention_context"],
dynamic_axes={"decoder_input" : {0: "batch_size"},
"attention_hidden" : {0: "batch_size"},
"attention_cell" : {0: "batch_size"},
"decoder_hidden" : {0: "batch_size"},
"decoder_cell" : {0: "batch_size"},
"attention_weights" : {0: "batch_size", 1: "seq_len"},
"attention_weights_cum" : {0: "batch_size", 1: "seq_len"},
"attention_context" : {0: "batch_size"},
"memory" : {0: "batch_size", 1: "seq_len"},
"processed_memory" : {0: "batch_size", 1: "seq_len"},
"mask" : {0: "batch_size", 1: "seq_len"},
"decoder_output" : {0: "batch_size"},
"gate_prediction" : {0: "batch_size"},
"out_attention_hidden" : {0: "batch_size"},
"out_attention_cell" : {0: "batch_size"},
"out_decoder_hidden" : {0: "batch_size"},
"out_decoder_cell" : {0: "batch_size"},
"out_attention_weights" : {0: "batch_size", 1: "seq_len"},
"out_attention_weights_cum" : {0: "batch_size", 1: "seq_len"},
"out_attention_context" : {0: "batch_size"}
})
postnet = Postnet(tacotron2)
dummy_input = torch.randn((1,80,620)).cuda()
if args.fp16:
dummy_input = dummy_input.half()
torch.onnx.export(postnet, dummy_input, args.output+"/"+"postnet.onnx",
opset_version=opset_version,
do_constant_folding=True,
input_names=["mel_outputs"],
output_names=["mel_outputs_postnet"],
dynamic_axes={"mel_outputs": {0: "batch_size", 2: "mel_seq"},
"mel_outputs_postnet": {0: "batch_size", 2: "mel_seq"}})
mel = test_inference(encoder, decoder_iter, postnet)
torch.save(mel, "mel.pt")
if __name__ == '__main__':
main()
|
the-stack_0_18577 | #!/usr/bin/python
#from udacityplots import *
import warnings
warnings.filterwarnings("ignore")
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import pylab as pl
import numpy as np
#import numpy as np
#import matplotlib.pyplot as plt
#plt.ioff()
def prettyPicture(clf, X_test, y_test):
x_min = 0.0; x_max = 1.0
y_min = 0.0; y_max = 1.0
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
h = .01 # step size in the mesh
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.pcolormesh(xx, yy, Z, cmap=pl.cm.seismic)
# Plot also the test points
grade_sig = [X_test[ii][0] for ii in range(0, len(X_test)) if y_test[ii]==0]
bumpy_sig = [X_test[ii][1] for ii in range(0, len(X_test)) if y_test[ii]==0]
grade_bkg = [X_test[ii][0] for ii in range(0, len(X_test)) if y_test[ii]==1]
bumpy_bkg = [X_test[ii][1] for ii in range(0, len(X_test)) if y_test[ii]==1]
plt.scatter(grade_sig, bumpy_sig, color = "b", label="fast")
plt.scatter(grade_bkg, bumpy_bkg, color = "r", label="slow")
plt.legend()
plt.xlabel("bumpiness")
plt.ylabel("grade")
plt.savefig("test.png")
import base64
import json
import subprocess
def output_image(name, format, bytes):
image_start = "BEGIN_IMAGE_f9825uweof8jw9fj4r8"
image_end = "END_IMAGE_0238jfw08fjsiufhw8frs"
data = {}
data['name'] = name
data['format'] = format
data['bytes'] = base64.encodestring(bytes)
print (image_start+json.dumps(data)+image_end)
|
the-stack_0_18579 | import os, sys
import cv2
import numpy as np
import pandas as pd
import string
import mediapipe as mp
import pickle
from zipfile import ZipFile
from utils import StaticSignProcessor, mp_process_image, generate_dataframe, annotate_image, pred_class_to_letter
# load the model
with open('saved_model.pkl', 'rb') as f:
model = pickle.load(f)
# TODO change to generic video src handler
class VideoHandler():
def __init__(self, vid_src=0):
self.cap = cv2.VideoCapture(vid_src)
self.processor = StaticSignProcessor((126,))
self.timestamps = []
self.hand_results = []
self.pose_results = []
self.framecount = 0
self.prediction = 'Neutral'
self.score = ''
self.pred_thresh = 0.7
# self.colormap = {'No hands detected': (0,0,255), 'Neutral': (255,0,0)}
def load_source(self, vid_src):
self.cap.release()
self.cap = cv2.VideoCapture(vid_src)
def generate_buffer(self, frame, buffer_size=10, sliding_window=1, callback=None):
'''
Generates a buffer of fixed length from a live video stream
to be processed and passed into the recognition model.
Returns:
A dict containing timestamps, hand_results, and pose_results
if the buffer condition is met
'''
assert buffer_size > 0, 'Buffer size must be a positive number'
assert sliding_window > 0, 'Sliding window size must be a positive number'
assert buffer_size > sliding_window, 'Sliding window must be smaller than buffer'
hand_result, pose_result = mp_process_image(frame)
if not hand_result.multi_handedness:
self.timestamps = []
self.hand_results = []
self.pose_results = []
self.framecount = 0
return
# time is a construct
self.timestamps.append(0.0)
self.hand_results.append(hand_result)
self.pose_results.append(pose_result)
self.framecount += 1
if (self.framecount % buffer_size == 0) or \
(self.framecount % sliding_window == 0 and self.framecount > buffer_size):
buf = {'timestamps': self.timestamps,
'hand_results': self.hand_results,
'pose_results': self.pose_results}
self.timestamps = self.timestamps[sliding_window:]
self.hand_results = self.hand_results[sliding_window:]
self.pose_results = self.pose_results[sliding_window:]
if callback:
callback(buf)
return buf
def get_next_frame(self):
'''
Reads the next frame from the webcam and makes a prediction when applicable.
Returns:
- None if webcam feed is closed or can't read feed
- annotated image if feed is open
- annotated image, prediction, score if feed is open and buffer condition is met
'''
if not self.cap.isOpened():
return
success, image = self.cap.read()
if not success:
return
buf = self.generate_buffer(image, buffer_size=10, sliding_window=1, callback=self.predict)
# if blur:
# image = cv2.blur(image, (25,25))
if self.hand_results:
image = annotate_image(image, self.hand_results[-1], self.pose_results[-1])
else:
self.prediction = 'No hands detected'
self.score = ''
image = cv2.flip(image, 1)
if self.prediction:
if self.prediction == 'No hands detected':
color = (0,0,255)
elif self.prediction == 'Neutral':
color = (255,0,0)
else:
color = (0,150,0)
cv2.putText(image, self.prediction + ' ' + self.score, (50,80), cv2.FONT_HERSHEY_SIMPLEX, 2, color, 4)
return image, self.prediction, self.score
def predict(self, buf):
# Make a prediction on the generated buffer
df = generate_dataframe(buf)
data = self.processor.process(df)
pred_prob = model.predict_proba([data])[0]
pred_class = list(pred_prob).index(max(pred_prob))
if max(pred_prob) < self.pred_thresh:
self.prediction = 'Neutral'
self.score = ''
else:
self.prediction = pred_class_to_letter(pred_class)[0]
self.score = str(round(max(pred_prob),2))
def get_frame(self):
if self.cap.isOpened():
success, frame = self.cap.read()
return frame
def stream_webcam(self):
'''
A helper function to demonstrate the VideoHandler's functionality.
Note that this is a blocking function: it will keep running until the webcam feed is closed.
'''
while self.cap.isOpened():
image,_,_ = self.get_next_frame()
cv2.imshow('webcam', image)
if cv2.waitKey(5) & 0xFF == 27:
print('esc')
break
# out = self.get_next_frame()
# while out:
# image,_,_ = out
# cv2.imshow('webcam', image)
# out = self.get_next_frame()
# if cv2.waitKey(5) & 0xFF == 27:
# print('esc')
# break
def evaluate_model(self, show=False):
'''
A helper function for evaluating the recognition model's performance.
It uses pre-recorded videos in test_webcam_data to test each letter.
The videos in the test data were not used to train the model.
'''
if not os.path.isdir('test_webcam_data'):
print('Unzipping test data...')
with ZipFile('test_webcam_data.zip','r') as zipobj:
zipobj.extractall()
accuracy = 0
for i in string.ascii_uppercase:
print('input:', i)
tmp = []
vid_src = f"test_webcam_data/{i}.mp4"
self.cap = cv2.VideoCapture(vid_src)
while self.cap.isOpened():
try:
image, pred, score = self.get_next_frame()
if pred not in ('Neutral','No hands detected'):
tmp.append(pred.replace('LETTER-',''))
if show:
cv2.imshow('webcam', image)
if cv2.waitKey(5) & 0xFF == 27:
print('esc')
break
except:
break
final_pred = max(set(tmp), key = tmp.count)
print('prediction:', final_pred)
if i == final_pred:
print('CORRECT')
accuracy += 1
else:
print('INCORRECT')
print('\n\nFinal Accuracy: {}/26 ({}%)'.format(str(accuracy), round(accuracy/26, 2)))
if __name__ == "__main__":
webcam = VideoHandler()
# webcam.stream_webcam()
webcam.evaluate_model(show=(len(sys.argv) > 1 and sys.argv[1] == '--show'))
|
the-stack_0_18580 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2015, Anaconda, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
from __future__ import absolute_import
import mock
from mock import patch
import pytest
from bokeh.core.validation import check_integrity
from bokeh.plotting import figure
from bokeh.models import GlyphRenderer, Label, Plot, LinearAxis
from bokeh.models.ranges import FactorRange, DataRange1d, Range1d
from bokeh.models.scales import CategoricalScale, LinearScale, LogScale
from bokeh.models.tools import PanTool
import bokeh.models.plots as bmp
_LEGEND_EMPTY_WARNING = """
You are attemptings to set `plot.legend.location` on a plot that has zero legends added, this will have no effect.
Before legend properties can be set, you must add a Legend explicitly, or call a glyph method with the 'legend' parameter set.
"""
class TestPlotLegendProperty(object):
def test_basic(self):
plot = figure(tools='')
x = plot.legend
assert isinstance(x, bmp._list_attr_splat)
assert len(x) == 0
plot.circle([1,2], [3,4], legend="foo")
x = plot.legend
assert isinstance(x, bmp._list_attr_splat)
assert len(x) == 1
def test_warnign(self):
plot = figure(tools='')
with pytest.warns(UserWarning) as warns:
plot.legend.location = "above"
assert len(warns) == 1
assert warns[0].message.args[0] == _LEGEND_EMPTY_WARNING
class TestPlotSelect(object):
def setup_method(self):
self._plot = figure(tools='pan')
self._plot.circle([1,2,3], [3,2,1], name='foo')
@patch('bokeh.models.plots.find')
def test_string_arg(self, mock_find):
self._plot.select('foo')
assert mock_find.called
assert mock_find.call_args[0][1] == dict(name='foo')
@patch('bokeh.models.plots.find')
def test_type_arg(self, mock_find):
self._plot.select(PanTool)
assert mock_find.called
assert mock_find.call_args[0][1] == dict(type=PanTool)
@patch('bokeh.models.plots.find')
def test_kwargs(self, mock_find):
kw = dict(name='foo', type=GlyphRenderer)
self._plot.select(**kw)
assert mock_find.called
assert mock_find.call_args[0][1] == kw
@patch('bokeh.models.plots.find')
def test_single_selector_kwarg(self, mock_find):
kw = dict(name='foo', type=GlyphRenderer)
self._plot.select(selector=kw)
assert mock_find.called
assert mock_find.call_args[0][1] == kw
def test_selector_kwarg_and_extra_kwargs(self):
with pytest.raises(TypeError) as exc:
self._plot.select(selector=dict(foo='foo'), bar='bar')
assert "when passing 'selector' keyword arg, not other keyword args may be present" == str(exc.value)
def test_bad_arg_type(self):
with pytest.raises(TypeError) as exc:
self._plot.select(10)
assert "selector must be a dictionary, string or plot object." == str(exc.value)
def test_too_many_args(self):
with pytest.raises(TypeError) as exc:
self._plot.select('foo', 'bar')
assert 'select accepts at most ONE positional argument.' == str(exc.value)
def test_no_input(self):
with pytest.raises(TypeError) as exc:
self._plot.select()
assert 'select requires EITHER a positional argument, OR keyword arguments.' == str(exc.value)
def test_arg_and_kwarg(self):
with pytest.raises(TypeError) as exc:
self._plot.select('foo', type=PanTool)
assert 'select accepts EITHER a positional argument, OR keyword arguments (not both).' == str(exc.value)
class TestPlotValidation(object):
def test_missing_renderers(self):
p = figure()
p.renderers = []
with mock.patch('bokeh.core.validation.check.logger') as mock_logger:
check_integrity([p])
assert mock_logger.warning.call_count == 1
assert mock_logger.warning.call_args[0][0].startswith("W-1000 (MISSING_RENDERERS): Plot has no renderers")
def test_missing_scale(self):
p = figure()
p.x_scale = None
with mock.patch('bokeh.core.validation.check.logger') as mock_logger:
check_integrity([p])
assert mock_logger.error.call_count == 1
assert mock_logger.error.call_args[0][0].startswith("E-1008 (REQUIRED_SCALE): A required Scale object is missing: x_scale")
p.y_scale = None
with mock.patch('bokeh.core.validation.check.logger') as mock_logger:
check_integrity([p])
assert mock_logger.error.call_count == 1
assert mock_logger.error.call_args[0][0].startswith("E-1008 (REQUIRED_SCALE): A required Scale object is missing: x_scale, y_scale")
def test_missing_range(self):
p = figure()
p.x_range = None
with mock.patch('bokeh.core.validation.check.logger') as mock_logger:
check_integrity([p])
assert mock_logger.error.call_count == 1
assert mock_logger.error.call_args[0][0].startswith("E-1004 (REQUIRED_RANGE): A required Range object is missing: x_range")
p.y_range = None
with mock.patch('bokeh.core.validation.check.logger') as mock_logger:
check_integrity([p])
assert mock_logger.error.call_count == 1
assert mock_logger.error.call_args[0][0].startswith("E-1004 (REQUIRED_RANGE): A required Range object is missing: x_range, y_range")
def test_bad_extra_range_name(self):
p = figure()
p.xaxis.x_range_name="junk"
with mock.patch('bokeh.core.validation.check.logger') as mock_logger:
check_integrity([p])
assert mock_logger.error.call_count == 1
assert mock_logger.error.call_args[0][0].startswith(
"E-1020 (BAD_EXTRA_RANGE_NAME): An extra range name is configued with a name that does not correspond to any range: x_range_name='junk' [LinearAxis"
)
p = figure()
p.extra_x_ranges['foo'] = Range1d()
p.grid.x_range_name="junk"
with mock.patch('bokeh.core.validation.check.logger') as mock_logger:
check_integrity([p])
assert mock_logger.error.call_count == 1
assert mock_logger.error.call_args[0][0].startswith(
"E-1020 (BAD_EXTRA_RANGE_NAME): An extra range name is configued with a name that does not correspond to any range: x_range_name='junk' [Grid"
)
assert mock_logger.error.call_args[0][0].count("Grid") == 2
def test_plot_add_layout_raises_error_if_not_render():
plot = figure()
with pytest.raises(ValueError):
plot.add_layout(Range1d())
def test_plot_add_layout_raises_error_if_plot_already_on_annotation():
plot = figure()
with pytest.raises(ValueError):
plot.add_layout(Label(plot=plot))
def test_plot_add_layout_adds_label_to_plot_renderers():
plot = figure()
label = Label()
plot.add_layout(label)
assert label in plot.renderers
def test_plot_add_layout_adds_axis_to_renderers_and_side_renderers():
plot = figure()
axis = LinearAxis()
plot.add_layout(axis, 'left')
assert axis in plot.renderers
assert axis in plot.left
def test_sizing_mode_property_is_fixed_by_default():
plot = figure()
assert plot.sizing_mode is 'fixed'
class BaseTwinAxis(object):
"""Base class for testing extra ranges"""
def verify_axis(self, axis_name):
plot = Plot()
range_obj = getattr(plot, 'extra_{}_ranges'.format(axis_name))
range_obj['foo_range'] = self.get_range_instance()
assert range_obj['foo_range']
def test_x_range(self):
self.verify_axis('x')
def test_y_range(self):
self.verify_axis('y')
@staticmethod
def get_range_instance():
raise NotImplementedError
class TestCategoricalTwinAxis(BaseTwinAxis, object):
"""Test whether extra x and y ranges can be categorical"""
@staticmethod
def get_range_instance():
return FactorRange('foo', 'bar')
class TestLinearTwinAxis(BaseTwinAxis, object):
"""Test whether extra x and y ranges can be Range1d"""
@staticmethod
def get_range_instance():
return Range1d(0, 42)
def test_plot_with_no_title_specified_creates_an_empty_title():
plot = Plot()
assert plot.title.text == ""
def test_plot__scale_classmethod():
assert isinstance(Plot._scale("auto"), LinearScale)
assert isinstance(Plot._scale("linear"), LinearScale)
assert isinstance(Plot._scale("log"), LogScale)
assert isinstance(Plot._scale("categorical"), CategoricalScale)
with pytest.raises(ValueError):
Plot._scale("malformed_type")
def test__check_required_scale_has_scales():
plot = Plot()
check = plot._check_required_scale()
assert check == []
def test__check_required_scale_missing_scales():
plot = Plot(x_scale=None, y_scale=None)
check = plot._check_required_scale()
assert check != []
def test__check_compatible_scale_and_ranges_compat_numeric():
plot = Plot(x_scale=LinearScale(), x_range=Range1d())
check = plot._check_compatible_scale_and_ranges()
assert check == []
plot = Plot(y_scale=LogScale(), y_range=DataRange1d())
check = plot._check_compatible_scale_and_ranges()
assert check == []
def test__check_compatible_scale_and_ranges_compat_factor():
plot = Plot(x_scale=CategoricalScale(), x_range=FactorRange())
check = plot._check_compatible_scale_and_ranges()
assert check == []
def test__check_compatible_scale_and_ranges_incompat_numeric_scale_and_factor_range():
plot = Plot(x_scale=LinearScale(), x_range=FactorRange())
check = plot._check_compatible_scale_and_ranges()
assert check != []
def test__check_compatible_scale_and_ranges_incompat_factor_scale_and_numeric_range():
plot = Plot(x_scale=CategoricalScale(), x_range=DataRange1d())
check = plot._check_compatible_scale_and_ranges()
assert check != []
|
the-stack_0_18581 | import sublime
import sublime_plugin
import sys
ST2 = sys.version_info < (3, 3)
class TransliterateCommand(sublime_plugin.TextCommand):
def run(self, edit, dictionary_file):
s = sublime.load_settings(dictionary_file)
dictionary = s.get('chars_mapping')
# invert dict
# reason: it was a problem loading dict with unicode keys in sublime
dictionary = dict([[v, k] for k, v in dictionary.items()])
selections = self.view.sel()
for sel in selections:
selection_text = self.view.substr(sel)
self.view.replace(edit, sel, translit(selection_text, dictionary))
def translit(input_string, dictionary):
translit_string = []
for char in input_string:
translit_string.append(dictionary.get(char, char))
return ''.join(translit_string)
|
the-stack_0_18584 | from matplotlib import cm, rcParams
import matplotlib.pyplot as plt
import matplotlib as matplotlib
from matplotlib import patheffects
import numpy as np
import math as math
import random as rand
import os
import csv
rcParams.update({'figure.autolayout': True})
#matplotlib.pyplot.xkcd(scale=.5, length=100, randomness=2)
#rcParams['path.effects'] = [patheffects.withStroke(linewidth=.5)]
c = ['#aa3863', '#3b7d86', '#5443a3']
times = []
V1_plot1, V2_plot2, V2_plot3 = [], [], []
Vth = 1
Vr = 0
fig, ax = plt.subplots(1, 3, figsize=(12,3.5))
k = 0
with open('spikelet_beta=0.1.dat', newline='') as file:
datareader = csv.reader(file, delimiter=' ')
for row in datareader:
if float(row[0]) >= 0 and float(row[0]) <= 20 :
times.append(float(row[0]))
V1_plot1.append(float(row[1]))
V2_plot2.append(float(row[2]))
with open('spikelet_beta=0.2.dat', newline='') as file:
datareader = csv.reader(file, delimiter=' ')
for row in datareader:
if float(row[0]) >= 0 and float(row[0]) <= 20 :
V2_plot3.append(float(row[2]))
# A spike occurs iff there was a reset
for i in range(1,len(V1_plot1)) :
if abs(V1_plot1[i]-V1_plot1[i-1]) > (Vth-Vr)/2 and V1_plot1[i] <= 1 and V1_plot1[i-1] <= 1:
V1_plot1.insert(i, Vth+0.5)
V2_plot2.insert(i, V2_plot2[i])
V2_plot3.insert(i, V2_plot3[i])
times.insert(i, times[i])
ax[0].plot(times, V1_plot1, alpha=1, color=c[0], linestyle='-', label='$V_1$') #alpha=0.75
ax[1].plot(times, V2_plot2, alpha=1, color=c[1], linestyle='-', label='$V_2$') #alpha=0.75
ax[2].plot(times, V2_plot3, alpha=1, color=c[1], linestyle='-', label='$V_2$') #alpha=0.75
ax[0].set_xlabel('Time ($10^{-2}$ seconds)', size=12)
ax[1].set_xlabel('Time ($10^{-2}$ seconds)', size=12)
ax[2].set_xlabel('Time ($10^{-2}$ seconds)', size=12)
ax[0].set_ylabel('Voltage $V_{k}, k \in \{1,2\}$', size=12)
ax[0].set_xlim(2.5, 10)
ax[0].set_ylim(-0.1, 1.6)
ax[1].set_xlim(2.5, 10)
ax[1].set_ylim(0.85, 0.95)
ax[2].set_xlim(2.5, 10)
ax[2].set_ylim(0.85, 0.95)
fig.suptitle('Dominantly inhibitory effects', size=16)
ax[0].set_title('Presynaptic spike', size=14)
ax[1].set_title('Post-synaptic effect, $\\beta=0.1$', size=14)
ax[2].set_title('Post-synaptic effect, $\\beta=0.2$', size=14)
ax[0].legend(loc='upper right', fontsize=12)
ax[1].legend(loc='upper right', fontsize=12)
ax[2].legend(loc='upper right', fontsize=12)
plt.tight_layout()
plt.savefig('spikelet_shape.svg')
plt.show()
|
the-stack_0_18585 | # The Admin4 Project
# (c) 2013-2022 Andreas Pflug
#
# Licensed under the Apache License,
# see LICENSE.TXT for conditions of usage
import wx.aui
import adm
import xmlres
import wx.grid
from wh import xlt, Menu, AcceleratorHelper, FileManager, Grid, localTimeMillis
from ._pgsql import pgConnection, quoteIdent
from ._explain import ExplainCanvas
from ._snippet import SnippetTree
from ._sqlgrid import SqlFrame, StringTable, HMARGIN, VMARGIN
from ._sqledit import SqlEditor
NULLSTRING="(NULL)"
class SqlResultGrid(Grid):
def __init__(self, parent):
Grid.__init__(self, parent)
self.SetTable(StringTable(0,0))
self.SetColLabelSize(0)
self.SetRowLabelSize(0)
pt=parent.GetFont().GetPointSize()
if wx.Platform != "__WXMSW__":
pt *= 0.95 # a little smaller
font=wx.Font(pt, wx.FONTFAMILY_TELETYPE, wx.NORMAL, wx.NORMAL)
self.SetDefaultCellFont(font)
self.Bind(wx.grid.EVT_GRID_COL_SIZE, self.OnChangeColSize)
self.AutoSize()
def OnChangeColSize(self, _evt):
adm.config.storeGridPositions(self)
def SetEmpty(self):
self.table=self.SetTable(StringTable(0,0))
self.SetColLabelSize(0)
self.SetRowLabelSize(0)
self.SendSizeEventToParent()
def SetData(self, rowset):
rowcount=rowset.GetRowcount()
colcount=len(rowset.colNames)
if rowcount<0:
rowcount=0
self.SetTable(StringTable(rowcount, colcount))
w,h=self.GetTextExtent('Colname')
self.SetColLabelSize(h+HMARGIN)
self.SetRowLabelSize(w+VMARGIN)
self.SetDefaultRowSize(h+HMARGIN)
self.previousCols=rowset.colNames
self.Freeze()
self.BeginBatch()
for x in range(colcount):
colname=rowset.colNames[x]
if colname == '?column?':
colname="Col #%d" % (x+1)
self.table.SetColLabelValue(x, colname)
y=0
for row in rowset:
for x in range(colcount):
val=row[x]
if val == None:
val=NULLSTRING
else:
val=str(val)
self.SetCellValue(y, x, val)
self.SetReadOnly(y,x)
y = y+1
self.EndBatch()
self.AutoSizeColumns(False)
adm.config.restoreGridPositions(self)
adm.config.storeGridPositions(self)
self.Thaw()
self.SendSizeEventToParent()
def Paste(self):
pass
def Cut(self):
self.Copy()
def Copy(self):
cellSep=", "
rowSep="\n"
vals=self.GetAllSelectedCellValues()
if vals:
tl=self.GetSelectionBlockTopLeft()
if len(tl):
br=self.GetSelectionBlockBottomRight()
rowvals=[]
start=0
for bc in range(len(tl)):
rows=br[bc][0]-tl[bc][0]+1
cols=br[bc][1]-tl[bc][1]+1
for rc in range(start, rows*cols, cols):
rowvals.append(cellSep.join(vals[0][rc: rc+cols]))
start += rows*cols
txt=rowSep.join(rowvals)
adm.SetClipboard(txt)
else:
txt=rowSep.join(map(lambda row:cellSep.join(row), vals))
adm.SetClipboard(txt)
def GetQuotedColLabelValue(self, col):
quoteChar="'"
val=self.GetColLabelValue(col)
return self.quoteVal(val, quoteChar)
def GetQuotedCellValue(self, row, col):
quoteChar="'"
val=self.GetCellValue(row, col)
if val == NULLSTRING:
return "NULL"
return self.quoteVal(val, quoteChar)
class QueryFrame(SqlFrame):
def __init__(self, parentWin, node, params={}):
SqlFrame.__init__(self, parentWin, xlt("Query Tool"), "SqlQuery")
self.server=node.GetServer()
self.application="%s Query Tool" % adm.appTitle
snippet_table=self.server.info.get('snippet_table')
if self.server.adminspace and snippet_table:
self.snippet_table="%s.%s" % (quoteIdent(self.server.adminspace), quoteIdent(snippet_table))
else:
self.snippet_table=None
dbName=params.get('dbname')
if not dbName:
if hasattr(node, "GetDatabase"):
dbName=node.GetDatabase().name
elif node.parentNode and hasattr(node.parentNode, 'GetDatabase'):
dbName=node.parentNode.GetDatabase().name
else:
dbName=self.server.maintDb
self.worker=None
self.sqlChanged=False
self.previousCols=[]
self.fileManager=FileManager(self, adm.config)
toolbar=self.toolbar
toolbar.Add(self.OnFileOpen, xlt("Load from file"),"file_open")
toolbar.Add(self.OnFileSave, xlt("Save to file"), "file_save")
toolbar.Add(self.OnToggleSnippets, xlt("Show snippets browser"), "snippets")
toolbar.AddSeparator()
toolbar.Add(self.OnCopy, xlt("Copy"), "clip_copy")
toolbar.Add(self.OnCut, xlt("Cut"), "clip_cut")
toolbar.Add(self.OnPaste, xlt("Paste"), "clip_paste")
toolbar.Add(self.OnClear, xlt("Clear"), "edit_clear")
toolbar.AddSeparator()
toolbar.Add(self.OnUndo, xlt("Undo"), "edit_undo")
toolbar.Add(self.OnRedo, xlt("Redo"), "edit_redo")
# toolbar.Add((self.OnFind, xlt("Find"), "edit_find")
toolbar.AddSeparator()
cbClass=xmlres.getControlClass("whComboBox")
allDbs=self.server.GetConnectableDbs()
size=max(map(lambda db: toolbar.GetTextExtent(db)[0], allDbs))
BUTTONOFFS=30
self.databases=cbClass(toolbar, size=(size+BUTTONOFFS, -1))
self.databases.Append(allDbs)
self.databases.Append(xlt("Connect..."))
self.databases.SetStringSelection(dbName)
self.OnChangeDatabase()
self.databases.Bind(wx.EVT_COMBOBOX, self.OnChangeDatabase)
toolbar.Add(self.OnExecuteQuery, xlt("Execute Query"), "query_execute")
toolbar.Add(self.OnExplainQuery, xlt("Explain Query"), "query_explain")
toolbar.Add(self.OnCancelQuery, xlt("Execute Query"), "query_cancel")
toolbar.AddControl(self.databases)
toolbar.AddSeparator()
toolbar.Add(self.OnAddSnippet, xlt("Add snippet"), "snippet_add")
toolbar.Add(self.OnReplaceSnippet, xlt("Replace snippet"), "snippet_replace")
toolbar.Realize()
menubar=wx.MenuBar()
self.filemenu=menu=Menu(self)
menu.Add(self.OnFileOpen, xlt("&Open"), xlt("Open query file"))
menu.Append(-1, xlt("Open recent..."), self.fileManager.GetRecentFilesMenu())
menu.Add(self.OnFileInsert, xlt("&Insert"), xlt("Insert query file"))
menu.Add(self.OnFileSave, xlt("&Save"), xlt("Save current file"))
menu.Add(self.OnFileSaveAs, xlt("Save &as.."), xlt("Save file under new name"))
menu.AppendSeparator()
# menu.Add(xlt("Preferences"), xlt("Preferences"), self.OnPreferences)
menu.Add(self.OnClose, xlt("Quit SQL"), xlt("Quit Sql"))
menubar.Append(menu, xlt("&File"))
self.viewmenu=menu=Menu(self)
menu.AddCheck(self.OnToggleSnippets, xlt("Snippets"), xlt("Show or hide snippet browser"))
self.registerToggles(True, True)
menubar.Append(self.viewmenu, xlt("&View"))
self.editmenu=menu=Menu(self)
menu.Add(self.OnUndo, xlt("&Undo"), xlt("Undo last action"))
menu.Add(self.OnRedo, xlt("&Redo"), xlt("Redo last action"))
# menu.Add(xlt("&Find"), xlt("Find string"), self.OnFind)
menu.AppendSeparator()
menu.Add(self.OnCut, xlt("Cu&t"), xlt("Cut selected text to clipboard"))
menu.Add(self.OnCopy, xlt("&Copy"), xlt("Copy selected text to clipboard"))
menu.Add(self.OnPaste, xlt("&Paste"), xlt("Paste text from clipboard"))
menu.Add(self.OnClear, xlt("C&lear"), xlt("Clear editor"))
menu.AppendSeparator()
menu.Add(self.OnAddSnippet, xlt("Add snippet"), xlt("Add selected text to snippets"))
menu.Add(self.OnReplaceSnippet, xlt("Modify snippet"), xlt("Replace snippet with selected text"))
menubar.Append(menu, xlt("&Edit"))
self.querymenu=menu=Menu(self)
menu.Add(self.OnExecuteQuery, xlt("Execute"), xlt("Execute query"))
menu.Add(self.OnExplainQuery, xlt("Explain"), xlt("Explain query"))
menu.Add(self.OnCancelQuery, xlt("Cancel"), xlt("Cancel query execution"))
menubar.Append(menu, xlt("&Query"))
self.helpmenu=menu=Menu(self)
menu.Add(self.OnHelp, xlt("Help"), xlt("Show help"), wx.ID_HELP)
menubar.Append(menu, xlt("&Help"))
self.EnableMenu(self.querymenu, self.OnCancelQuery, False)
self.SetMenuBar(menubar)
ah=AcceleratorHelper(self)
ah.Add(wx.ACCEL_CTRL, 'X', self.OnCut)
ah.Add(wx.ACCEL_CTRL, 'C', self.OnCopy)
ah.Add(wx.ACCEL_CTRL, 'V', self.OnPaste)
ah.Add(wx.ACCEL_NORMAL,wx.WXK_F5, self.OnExecuteQuery)
ah.Add(wx.ACCEL_NORMAL,wx.WXK_F7, self.OnExplainQuery)
ah.Add(wx.ACCEL_ALT,wx.WXK_PAUSE, self.OnCancelQuery)
self.editor=SqlEditor(self)
self.editor.SetAcceleratorTable(ah.GetTable())
self.editor.BindProcs(self.OnChangeStc, self.OnStatusPos)
self.manager.AddPane(self.editor, wx.aui.AuiPaneInfo().Top().PaneBorder().Resizable().MinSize((200,100)).BestSize((400,200)).CloseButton(False) \
.Name("sqlQuery").Caption(xlt("SQL Query")))
self.snippets=SnippetTree(self, self.server, self.editor)
self.manager.AddPane(self.snippets, wx.aui.AuiPaneInfo().Left().Top().PaneBorder().Resizable().MinSize((100,100)).BestSize((100,100)).CloseButton(True) \
.Name("snippets").Caption(xlt("SQL Snippets")))
if not self.snippet_table:
self.manager.GetPane("snippets").Show(False)
self.output=wx.Notebook(self)
self.result=SqlResultGrid(self.output)
self.explain = ExplainCanvas(self.output)
self.explain.Hide()
font=self.editor.GetFont()
self.messages=wx.TextCtrl(self.output, style=wx.TE_MULTILINE|wx.TE_READONLY|wx.TE_DONTWRAP)
self.msgHistory=wx.TextCtrl(self.output, style=wx.TE_MULTILINE|wx.TE_READONLY|wx.TE_DONTWRAP)
self.messages.SetFont(font)
self.msgHistory.SetFont(font)
self.output.AddPage(self.result, xlt("Output"))
self.output.AddPage(self.messages, xlt("Messages"))
self.output.AddPage(self.msgHistory, xlt("History"))
self.manager.AddPane(self.output, wx.aui.AuiPaneInfo().Center().MinSize((200,100)).BestSize((400,200)).CloseButton(False) \
.Name("Result").Caption(xlt("Result")).CaptionVisible(False))
self.manager.Bind(wx.aui.EVT_AUI_PANE_CLOSE, self.OnAuiCloseEvent)
self.SetStatus(xlt("ready"))
self.restorePerspective()
self.manager.GetPane("Result").Show()
self.manager.Update()
self.viewmenu.Check(self.OnToggleSnippets, self.manager.GetPane("snippets").IsShown())
self.OnToggleToolBar()
self.OnToggleStatusBar()
self.updateMenu()
query=params.get('query')
if query:
self.editor.SetText(query)
pos=params.get('errline', -1)
if pos:
line=self.editor.LineFromPosition(int(pos))
self.editor.MarkerSet(line)
msg=params.get('message')
if msg:
self.messages.AppendText(msg)
hint=params.get('hint')
if hint:
self.messages.AppendText("\n\nHINT:\n")
self.messages.AppendText(hint)
self.output.SetSelection(1)
# in practice, not helpful.
# elif hasattr(node, 'GetSql'):
# self.editor.SetText(node.GetSql())
self.Show()
self.editor.SetFocus()
def SetTitle(self, dbName):
title=xlt("PostGreSQL Query Tool - Database \"%(dbname)s\" on Server \"%(server)s\"" % { 'dbname': dbName, 'server': self.server.name})
adm.Frame.SetTitle(self, title)
def OnHelp(self, _evt):
wx.LaunchDefaultBrowser("http://www.admin4.org/docs/pgsql/querytool")
def OnClose(self, evt):
self.OnCancelQuery(None)
for i in range(self.databases.GetCount()):
conn=self.databases.GetClientData(i)
if conn:
conn.disconnect()
super(QueryFrame, self).OnClose(evt)
self.Destroy()
def OnChangeDatabase(self, _evt=None):
i=self.databases.GetSelection()
if i == self.databases.GetCount()-1:
class ConnectDlg(adm.CheckedDialog):
def __init__(self, frame):
adm.CheckedDialog.__init__(self, frame)
self.frame=frame
def Go(self):
self['Database'].AppendItems(self.frame.server.GetConnectableDbs())
self['Database'].SetStringSelection(self.frame.server.maintDb)
def Execute(self):
user=dlg.User
if user: dbName="%s@%s" % (user, self.Database)
else: dbName=self.Database
if self.frame.databases.FindString(dbName) < 0:
try:
conn = pgConnection(self.frame.server.GetDsn(self.Database, self.frame.application, user, self.password))
self.frame.lastDatabaseSelection=self.frame.databases.GetCount()-1
self.frame.databases.Insert(dbName, self.frame.lastDatabaseSelection, conn)
except Exception as e:
self.SetStatus(str(e))
return False
return True
dlg=ConnectDlg(self)
dlg.GoModal()
self.databases.SetSelection(self.lastDatabaseSelection)
return
elif i >= 0:
dbName=self.databases.GetString(i)
self.conn = self.databases.GetClientData(i)
if not self.conn:
try:
self.conn = pgConnection(self.server.GetDsn(dbName, self.application))
self.databases.SetClientData(i, self.conn)
except Exception as e:
print (str(e))
self.SetTitle(dbName)
self.lastDatabaseSelection=i
def updateMenu(self, ctl=None):
if not self.GetToolBar():
return
canCut=canPaste=canUndo=canRedo=False
if not ctl or ctl == self.editor:
canUndo=self.editor.CanUndo()
canRedo=self.editor.CanRedo()
canPaste=True # self.editor.CanPaste() crashes under wxGTK
canCut = True
a,e=self.editor.GetSelection()
canQuery = not self.worker and ( a!=e or self.editor.GetLineCount() >1 or self.getSql() )
self.EnableMenu(self.editmenu, self.OnAddSnippet, self.snippet_table)
self.EnableMenu(self.editmenu, self.OnReplaceSnippet, self.snippets.CanReplace())
self.EnableMenu(self.editmenu, self.OnCut, canCut)
self.EnableMenu(self.editmenu, self.OnPaste, canPaste)
self.EnableMenu(self.editmenu, self.OnUndo, canUndo)
self.EnableMenu(self.editmenu, self.OnRedo, canRedo)
self.EnableMenu(self.editmenu, self.OnClear, canQuery)
# self.EnableMenu(self.editmenu, self.OnFind, canQuery)
self.EnableMenu(self.filemenu, self.OnFileSave, self.sqlChanged)
self.EnableMenu(self.querymenu, self.OnExecuteQuery, canQuery)
self.EnableMenu(self.querymenu, self.OnExplainQuery, canQuery)
def executeSql(self, targetPage, sql, _queryOffset=0, resultToMsg=False):
self.EnableMenu(self.querymenu, self.OnCancelQuery, True)
self.EnableMenu(self.querymenu, self.OnExecuteQuery, False)
self.EnableMenu(self.querymenu, self.OnExplainQuery, False)
wx.YieldIfNeeded()
self.startTime=localTimeMillis();
self.worker=worker=self.conn.GetCursor().ExecuteAsync(sql)
rowcount=0
rowset=None
worker.start()
self.SetStatus(xlt("Query is running."));
self.SetStatusText("", self.STATUSPOS_SECS);
self.SetStatusText("", self.STATUSPOS_ROWS);
self.msgHistory.AppendText(xlt("-- Executing query:\n"));
self.msgHistory.AppendText(sql);
self.msgHistory.AppendText("\n");
self.editor.MarkerDelete()
self.messages.Clear()
durationTxt=self.pollWorker()
self.worker=None
self.EnableMenu(self.querymenu, self.OnCancelQuery, False)
self.EnableMenu(self.querymenu, self.OnExecuteQuery, True)
self.EnableMenu(self.querymenu, self.OnExplainQuery, True)
if worker.error:
errmsg=worker.error.error
errlines=errmsg.splitlines()
self.messages.SetValue(errmsg)
self.msgHistory.AppendText(errmsg)
for i in range(1, len(errlines)):
if errlines[i].startswith("LINE "):
lineinfo=errlines[i].split(':')[0][5:]
colinfo=errlines[i+1].find('^')
_dummy=colinfo
self.editor.MarkerSet(int(lineinfo)-1 + self.editor.GetSelectOffset())
break
if worker.cancelled:
self.SetStatus(xlt("Cancelled."));
elif worker.error:
self.SetStatus(errlines[0]);
else:
self.SetStatus(xlt("OK."));
rowcount=worker.GetRowcount()
rowset=worker.GetResult()
if worker.error:
self.SetStatusText("", self.STATUSPOS_ROWS)
else:
if rowcount == 1:
rowsMsg=xlt("1 row affected")
elif rowcount < 0:
rowsMsg=xlt("Executed")
else:
rowsMsg= xlt("%d rows affected") % rowcount
self.SetStatusText(rowsMsg, self.STATUSPOS_ROWS)
self.msgHistory.AppendText("-- %s\n" % rowsMsg)
rowsMsg += xlt("; %s execution time.") % durationTxt
self.msgHistory.AppendText("\n")
currentPage=self.output.GetPage(0)
if currentPage != targetPage:
self.output.RemovePage(0)
currentPage.Hide()
targetPage.Show()
self.output.InsertPage(0, targetPage, xlt("Data output"), True)
if rowset and rowset.colNames:
self.output.SetSelection(0)
targetPage.SetData(rowset)
else:
self.output.SetSelection(1)
targetPage.SetEmpty()
for notice in self.conn.conn.notices:
self.messages.AppendText(notice);
self.messages.AppendText("\n")
if not worker.error:
if resultToMsg:
self.messages.SetValue("\n".join(targetPage.GetResult()))
else:
self.messages.SetValue(rowsMsg)
self.editor.SetFocus()
def getSql(self):
sql=self.editor.GetSelectedText()
if not sql:
sql=self.editor.GetText()
return sql.strip()
def OnAuiCloseEvent(self, evt):
if evt.GetPane().name == "snippets":
self.filemenu.Check(self.OnToggleSnippets, False)
def OnToggleSnippets(self, evt):
paneInfo=self.manager.GetPane("snippets")
how=self.viewmenu.IsChecked(self.OnToggleSnippets)
if isinstance(evt.EventObject, wx.ToolBar):
how=not how
self.viewmenu.Check(self.OnToggleSnippets, how)
paneInfo.Show(how)
self.manager.Update()
def OnAddSnippet(self, _evt):
sql=self.getSql()
if sql:
dlg=wx.TextEntryDialog(self, xlt("Snippet name"), xlt("Add snippet"))
if dlg.ShowModal() == wx.ID_OK:
name=dlg.GetValue()
self.snippets.AppendSnippet(name, sql)
self.SetStatus(xlt("Snipped stored."))
def OnReplaceSnippet(self, _evt):
sql=self.getSql()
if sql:
self.snippets.ReplaceSnippet(sql)
def OnCancelQuery(self, _evt):
self.EnableMenu(self.querymenu, self.OnCancelQuery, False)
if self.worker:
self.worker.Cancel()
def OnExecuteQuery(self, _evt):
sql=self.getSql()
if not sql.strip():
return
self.executeSql(self.result, sql)
def OnExplainQuery(self,_evt):
sql=self.getSql()
if not sql:
return
self.executeSql(self.explain, "EXPLAIN %s" % sql, 8, True)
def readFile(self, message, filename=None):
if not filename:
filename=self.fileManager.OpenFile(self, self.filePatterns, message)
if filename:
try:
f=open(filename, 'r')
sql=f.read()
f.close()
return sql
except:
self.SetStatus(xlt("Failed to read %s") % filename)
return None
def fileOpen(self, header, filename=None):
sql=self.readFile(header, filename)
if sql:
self.editor.ClearAll()
self.editor.ReplaceSelection(sql)
self.SetStatus(xlt("%d characters read from %s") % (len(sql), self.fileManager.currentFile))
self.updateMenu()
def OnRecentFileOpened(self, filename):
self.fileOpen(None, filename)
def OnFileOpen(self, _evt):
self.fileOpen(xlt("Open SQL file"))
def OnFileInsert(self, _evt):
sql=self.readFile(xlt("Insert SQL from file"))
if sql:
self.editor.ReplaceSelection(sql)
self.SetStatus(xlt("%d characters inserted from %s") % (len(sql), self.fileManager.currentFile))
self.updateMenu()
def saveFile(self, proc):
try:
ok=proc(self, self.editor.GetText(), self.filePatterns, xlt("Save SQL Query"))
if ok:
self.SetStatus(xlt("Saved SQL query to %s") % self.fileManager.filename)
self.sqlChanged=False
self.updateMenu()
else:
self.StatusText(xlt("Nothing saved"))
except:
self.SetStatus(xlt("Failed to save to %s") % self.fileManager.filename)
def OnFileSave(self, _evt):
self.saveFile(self.fileManager.SaveFile)
def OnFileSaveAs(self, _evt):
self.saveFile(self.fileManager.SaveFileAs)
def OnUndo(self, _evt):
self.editor.Undo()
def OnClear(self, _evt):
self.editor.ClearAll()
self.updateMenu()
def OnFind(self, _evt):
pass
def OnRedo(self, _evt):
self.editor.Redo()
def OnChangeStc(self, _evt):
self.sqlChanged=True
self.updateMenu()
def OnStatusPos(self, _evt):
row=self.editor.LineFromPosition(self.editor.GetCurrentPos())+1
col=self.editor.GetColumn(self.editor.GetCurrentPos())+1
self.SetStatusText(xlt("Ln %d Col %d") % (row, col), self.STATUSPOS_POS)
############################################################
# node menu
class QueryTool:
name=xlt("Query Tool")
help=xlt("Execute SQL Queries")
toolbitmap='SqlQuery'
@staticmethod
def GetInstrumentQuery(server):
sql="""SELECT 'snippet_table', relname FROM pg_class JOIN pg_namespace nsp ON nsp.oid=relnamespace
WHERE nspname='%(adminspace)s' AND relname='%(snippet_table)s'""" % {
'adminspace': server.GetPreference("AdminNamespace"),
'snippet_table': "Admin_Snippet_%s" % server.user
}
return sql
@staticmethod
def GetMissingInstrumentation(server):
if not server.info.get('snippet_table'):
return 'snippet_table'
@staticmethod
def DoInstrument(server):
if not server.info.get('snippet_table'):
snippet_table=quoteIdent("Admin_Snippet_%s" % server.user)
server.GetCursor().ExecuteSingle("""
CREATE TABLE %(adminspace)s.%(snippet_table)s
(id SERIAL PRIMARY KEY, parent INT4 NOT NULL DEFAULT 0, sort FLOAT NOT NULL DEFAULT 0.0, name TEXT, snippet TEXT);""" %
{'adminspace': quoteIdent(server.adminspace),
'snippet_table': snippet_table })
@staticmethod
def CheckAvailableOn(_node):
return True
@staticmethod
def OnExecute(parentWin, node):
_frame=QueryFrame(parentWin, node)
nodeinfo=[]
menuinfo=[ {"class": QueryTool, "sort": 35 } ]
|
the-stack_0_18586 | import asyncio
import logging
import os
import unittest
from integration_tests.env_variable_names import SLACK_SDK_TEST_USER_TOKEN
from integration_tests.helpers import async_test
from slack import WebClient
class TestWebClient(unittest.TestCase):
"""Runs integration tests with real Slack API
https://github.com/slackapi/python-slack-sdk/issues/378
"""
def setUp(self):
self.logger = logging.getLogger(__name__)
self.user_token = os.environ[SLACK_SDK_TEST_USER_TOKEN]
self.sync_client: WebClient = WebClient(token=self.user_token, run_async=False, loop=asyncio.new_event_loop())
self.async_client: WebClient = WebClient(token=self.user_token, run_async=True)
def tearDown(self):
pass
def test_issue_378(self):
client = self.sync_client
response = client.users_setPhoto(image="tests/data/slack_logo_new.png")
self.assertIsNotNone(response)
@async_test
async def test_issue_378_async(self):
client = self.async_client
response = await client.users_setPhoto(image="tests/data/slack_logo_new.png")
self.assertIsNotNone(response)
|
the-stack_0_18587 | # (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from codecs import open # To use a consistent encoding
from os import path
from setuptools import setup
HERE = path.dirname(path.abspath(__file__))
# Get version info
ABOUT = {}
with open(path.join(HERE, 'datadog_checks', 'tokumx', '__about__.py')) as f:
exec(f.read(), ABOUT)
# Get the long description from the README file
with open(path.join(HERE, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
def get_dependencies():
dep_file = path.join(HERE, 'requirements.in')
if not path.isfile(dep_file):
return []
with open(dep_file, encoding='utf-8') as f:
return f.readlines()
def parse_pyproject_array(name):
import os
import re
from ast import literal_eval
pattern = r'^{} = (\[.*?\])$'.format(name)
with open(os.path.join(HERE, 'pyproject.toml'), 'r', encoding='utf-8') as f:
# Windows \r\n prevents match
contents = '\n'.join(line.rstrip() for line in f.readlines())
array = re.search(pattern, contents, flags=re.MULTILINE | re.DOTALL).group(1)
return literal_eval(array)
CHECKS_BASE_REQ = parse_pyproject_array('dependencies')[0]
setup(
name='datadog-tokumx',
version=ABOUT['__version__'],
description='The TokuMX check',
long_description=long_description,
long_description_content_type='text/markdown',
keywords='datadog agent tokumx check',
# The project's main homepage.
url='https://github.com/DataDog/integrations-core',
# Author details
author='Datadog',
author_email='[email protected]',
# License
license='BSD',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Topic :: System :: Monitoring',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
],
# The package we're going to ship
packages=['datadog_checks.tokumx'],
# Run-time dependencies
install_requires=[CHECKS_BASE_REQ],
extras_require={'deps': parse_pyproject_array('deps')},
# Extra files to ship with the wheel package
include_package_data=True,
)
|
the-stack_0_18588 | # ----------------------------------------------------------------------------
# Nombre: qfiledialog.py
# Autor: Gabriel F
# GitHub: https://github.com/gab98fra/
# Creado: 20 de Septiembre 2020
# Modificado: 24 de Septiembre 2020
# Copyright: (c) 2020 by Gabriel F, 2020
# ----------------------------------------------------------------------------
"""
Ejemplo de QFileDialog: Abrir y guardar un archivo txt
Python 3.8.2
PyQt 5.15.0
"""
from PyQt5.QtGui import QIcon, QFont
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import (QApplication, QDialog, QPushButton, QTextEdit, QLabel, QFileDialog)
import sys
class aplicacion(QDialog):
def __init__(self):
super().__init__()
self.setWindowTitle("QFileDialog with PyQt5")
self.setWindowIcon(QIcon("logo.png"))
self.resize(500,600)
self.setWindowFlags(Qt.WindowCloseButtonHint | Qt.MSWindowsFixedSizeDialogHint)
self.initUI()
def initUI(self):
#Widgets
label=QLabel("Abrir y guardar archivos txt", self)
label.move(120, 20)
pusbutton=QPushButton("Abrir un archivo", self)
pusbutton.move(100,80)
pusbutton1=QPushButton(self)
pusbutton1.setText("Guardar un archivo")
pusbutton1.move(300,80)
self.textedit=QTextEdit(self)
self.textedit.resize(460, 450)
self.textedit.move(20, 120)
#-------Funciones PushButton----------------
pusbutton.clicked.connect(self.openFile)
pusbutton1.clicked.connect(self.saveFile)
def openFile(self):
#Leer un archivo txt
filename=QFileDialog.getOpenFileName(self, "Abrir archivo", "c:\\", "Archivos txt (*.txt)")
if filename[0]:
#Si existe directorio: c://users/documents/...
#Abrir archivo
f=open(filename[0], "r")
with f:
#Leer contenido del archivo
data=f.read()
#Imprimir la información del archivo
self.textedit.setText(data)
def saveFile(self):
#Guardar archivo txt
options=QFileDialog.Option()
#Ventana distinta a la nativa
options=QFileDialog.DontUseNativeDialog
filesave, _ =QFileDialog.getSaveFileName(self, "Guardar archivo", "c:\\", "Archivos txt (*.txt)", options=options)
#filesave ----> directorio
f=open(filesave, 'w')
with f:
#Guardamos el archivo con la información de textedit
f.write(self.textedit.toPlainText())#Indica texto plano
if __name__ == '__main__':
app = QApplication(sys.argv)
ventana = aplicacion()
ventana.show()
sys.exit(app.exec_())
|
the-stack_0_18591 | import asyncio
import logging
from typing import Type
from .dt import DT
from .es import ES
from .et import ET
from .exceptions import InverterError
from .inverter import Inverter, Sensor, SensorKind
from .protocol import UdpInverterProtocol, Aa55ProtocolCommand
logger = logging.getLogger(__name__)
# Inverter family names
ET_FAMILY = ["ET", "EH", "BT", "BH"]
ES_FAMILY = ["ES", "EM", "BP"]
DT_FAMILY = ["DT", "NS", "XS"]
# Serial number tags to identify inverter type
ET_MODEL_TAGS = ["ETU", "EHU", "BTU", "BHU"]
ES_MODEL_TAGS = ["ESU", "EMU", "BPU", "BPS"]
# supported inverter protocols
_SUPPORTED_PROTOCOLS = [ET, DT, ES]
async def connect(host: str, port: int = 8899, family: str = None, comm_addr: int = None, timeout: int = 1,
retries: int = 3) -> Inverter:
"""Contact the inverter at the specified host/port and answer appropriate Inverter instance.
The specific inverter family/type will be detected automatically, but it can be passed explicitly.
Supported inverter family names are ET, EH, BT, BH, ES, EM, DT, NS, XS, BP.
Inverter communication address may be explicitly passed, if not the usual default value
will be used (0xf7 for ET/EH/BT/BH/ES/EM/BP inverters, 0x7f for DT/D-NS/XS inverters).
Since the UDP communication is by definition unreliable, when no (valid) response is received by the specified
timeout, it is considered lost and the command will be re-tried up to retries times.
Raise InverterError if unable to contact or recognise supported inverter.
"""
if family in ET_FAMILY:
inverter = ET(host, port, comm_addr, timeout, retries)
elif family in ES_FAMILY:
inverter = ES(host, port, comm_addr, timeout, retries)
elif family in DT_FAMILY:
inverter = DT(host, port, comm_addr, timeout, retries)
else:
return await discover(host, port, timeout, retries)
logger.debug(f"Connecting to {family} family inverter at {host}:{port}")
await inverter.read_device_info()
logger.debug(f"Connected to inverter {inverter.model_name}, S/N:{inverter.serial_number}")
return inverter
async def search_inverters() -> bytes:
"""Scan the network for inverters.
Answer the inverter discovery response string (which includes it IP address)
Raise InverterError if unable to contact any inverter
"""
logger.debug("Searching inverters by broadcast to port 48899")
loop = asyncio.get_running_loop()
on_response_received = loop.create_future()
transport, _ = await loop.create_datagram_endpoint(
lambda: UdpInverterProtocol(
"WIFIKIT-214028-READ".encode("utf-8"),
lambda r: True,
on_response_received,
1, 3
),
remote_addr=("255.255.255.255", 48899),
allow_broadcast=True,
)
try:
await on_response_received
result = on_response_received.result()
if result is not None:
return result
else:
raise InverterError("No response received to broadcast request")
except asyncio.CancelledError:
raise InverterError("No valid response received to broadcast request") from None
finally:
transport.close()
async def discover(host: str, port: int = 8899, timeout: int = 1, retries: int = 3) -> Inverter:
"""Contact the inverter at the specified value and answer appropriate Inverter instance
Raise InverterError if unable to contact or recognise supported inverter
"""
failures = []
# Try the common AA55C07F0102000241 command first and detect inverter type from serial_number
try:
logger.debug(f"Probing inverter at {host}:{port}")
response = await Aa55ProtocolCommand("010200", "0182").execute(host, port, timeout, retries)
model_name = response[12:22].decode("ascii").rstrip()
serial_number = response[38:54].decode("ascii")
inverter_class: Type[Inverter] = ET
for model_tag in ET_MODEL_TAGS:
if model_tag in serial_number:
logger.debug(f"Detected ET/EH/BT/BH inverter {model_name}, S/N:{serial_number}")
inverter_class = ET
for model_tag in ES_MODEL_TAGS:
if model_tag in serial_number:
logger.debug(f"Detected ES/EM/BP inverter {model_name}, S/N:{serial_number}")
inverter_class = ES
if inverter_class:
i = inverter_class(host, port, 0, timeout, retries)
await i.read_device_info()
return i
except InverterError as ex:
failures.append(ex)
# Probe inverter specific protocols
for inverter in _SUPPORTED_PROTOCOLS:
i = inverter(host, port, 0, timeout, retries)
try:
logger.debug(f"Probing {inverter.__name__} inverter at {host}:{port}")
await i.read_device_info()
logger.debug(f"Detected {inverter.__name__} protocol inverter {i.model_name}, S/N:{i.serial_number}")
return i
except InverterError as ex:
failures.append(ex)
raise InverterError(
"Unable to connect to the inverter at "
f"host={host} port={port}, or your inverter is not supported yet.\n"
f"Failures={str(failures)}"
)
|
the-stack_0_18592 | import argparse
import collections
import numpy as np
import torch
import torch.optim as optim
from torchvision import transforms
from retinanet import model
from retinanet.dataloader import CocoDataset, CSVDataset, collater, Resizer, AspectRatioBasedSampler, Augmenter, \
Normalizer
from torch.utils.data import DataLoader
from retinanet import coco_eval
from retinanet import csv2coco_eval
assert torch.__version__.split('.')[0] == '1'
print('CUDA available: {}'.format(torch.cuda.is_available()))
def main(args=None):
parser = argparse.ArgumentParser(description='Simple training script for training a RetinaNet network.')
parser.add_argument('--dataset', help='Dataset type, must be one of csv or coco.')
parser.add_argument('--coco_path', help='Path to COCO directory')
parser.add_argument('--csv_train', help='Path to file containing training annotations (see readme)')
parser.add_argument('--csv_classes', help='Path to file containing class list (see readme)')
parser.add_argument('--csv_val', help='Path to file containing validation annotations (optional, see readme)')
parser.add_argument('--depth', help='Resnet depth, must be one of 18, 34, 50, 101, 152', type=int, default=50)
parser.add_argument('--epochs', help='Number of epochs', type=int, default=100)
parser.add_argument('--pretrained', help='use pretrained head for resnet50',action='store_true', default=False)
parser.add_argument('--batch_size', help='Size of mini batch', type=int, default=4)
parser = parser.parse_args(args)
# Create the data loaders
if parser.dataset == 'coco':
if parser.coco_path is None:
raise ValueError('Must provide --coco_path when training on COCO,')
dataset_train = CocoDataset(parser.coco_path, set_name='train2017',
transform=transforms.Compose([Normalizer(), Augmenter(), Resizer()]))
dataset_val = CocoDataset(parser.coco_path, set_name='val2017',
transform=transforms.Compose([Normalizer(), Resizer()]))
elif parser.dataset == 'csv':
if parser.csv_train is None:
raise ValueError('Must provide --csv_train when training on COCO,')
if parser.csv_classes is None:
raise ValueError('Must provide --csv_classes when training on COCO,')
dataset_train = CSVDataset(train_file=parser.csv_train, class_list=parser.csv_classes,
transform=transforms.Compose([Normalizer(), Augmenter(), Resizer()]))
if parser.csv_val is None:
dataset_val = None
print('No validation annotations provided.')
else:
dataset_val = CSVDataset(train_file=parser.csv_val, class_list=parser.csv_classes,
transform=transforms.Compose([Normalizer(), Resizer()]))
else:
raise ValueError('Dataset type not understood (must be csv or coco), exiting.')
sampler = AspectRatioBasedSampler(dataset_train, batch_size=parser.batch_size, drop_last=False)
dataloader_train = DataLoader(dataset_train, num_workers=3, collate_fn=collater, batch_sampler=sampler)
if dataset_val is not None:
sampler_val = AspectRatioBasedSampler(dataset_val, batch_size=parser.batch_size, drop_last=False)
dataloader_val = DataLoader(dataset_val, num_workers=3, collate_fn=collater, batch_sampler=sampler_val)
# Create the model
if parser.depth == 18:
retinanet = model.resnet18(num_classes=dataset_train.num_classes(), pretrained=True)
elif parser.depth == 34:
retinanet = model.resnet34(num_classes=dataset_train.num_classes(), pretrained=True)
elif parser.depth == 50:
retinanet = model.resnet50(num_classes=dataset_train.num_classes(), pretrained=True)
elif parser.depth == 101:
retinanet = model.resnet101(num_classes=dataset_train.num_classes(), pretrained=True)
elif parser.depth == 152:
retinanet = model.resnet152(num_classes=dataset_train.num_classes(), pretrained=True)
else:
raise ValueError('Unsupported model depth, must be one of 18, 34, 50, 101, 152')
use_gpu = True
if (parser.pretrained==True):
retinanet.load_state_dict(torch.load('weights.pt'))
if use_gpu:
if torch.cuda.is_available():
retinanet = retinanet.cuda()
# if torch.cuda.is_available():
# retinanet = torch.nn.DataParallel(retinanet).cuda()
# else:
# retinanet = torch.nn.DataParallel(retinanet)
retinanet.training = True
optimizer = optim.Adam(retinanet.parameters(), lr=1e-5)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=3, verbose=True)
loss_hist = collections.deque(maxlen=500)
retinanet.train()
retinanet.freeze_bn()
print('Num training images: {}'.format(len(dataset_train)))
for epoch_num in range(parser.epochs):
retinanet.train()
retinanet.freeze_bn()
epoch_loss = []
for iter_num, data in enumerate(dataloader_train):
try:
optimizer.zero_grad()
if torch.cuda.is_available():
classification_loss, regression_loss = retinanet([data['img'].cuda().float(), data['annot'].cuda()])
else:
classification_loss, regression_loss = retinanet([data['img'].float(), data['annot']])
classification_loss = classification_loss.mean()
regression_loss = regression_loss.mean()
loss = classification_loss + regression_loss
if bool(loss == 0):
continue
loss.backward()
torch.nn.utils.clip_grad_norm_(retinanet.parameters(), 0.1)
optimizer.step()
loss_hist.append(float(loss))
epoch_loss.append(float(loss))
print(
'Epoch: {} | Iteration: {} | Classification loss: {:1.5f} | Regression loss: {:1.5f} | Running loss: {:1.5f}'.format(
epoch_num, iter_num, float(classification_loss), float(regression_loss), np.mean(loss_hist)))
del classification_loss
del regression_loss
except Exception as e:
print(e)
continue
if parser.dataset == 'coco':
print('Evaluating dataset')
coco_eval.evaluate_coco(dataset_val, retinanet)
elif parser.dataset == 'csv' and parser.csv_val is not None:
print('Evaluating dataset')
mAP = csv2coco_eval.evaluate_coco(dataset_val, retinanet)
scheduler.step(np.mean(epoch_loss))
torch.save(retinanet, './pth/{}_retinanet_{}.pt'.format(parser.depth, epoch_num))
retinanet.eval()
torch.save(retinanet, './pth/model_final_{}.pt'.format(parser.depth))
if __name__ == '__main__':
main()
|
the-stack_0_18595 | from core.models import UnitGroup
from django.contrib.auth.decorators import login_required
from django.http.request import HttpRequest
from django.http.response import HttpResponse, HttpResponseRedirect
from django.urls.base import reverse
from django.utils.text import slugify
from wiki.models import URLPath
def edit_redirect(u: URLPath) -> HttpResponseRedirect:
return HttpResponseRedirect(reverse('wiki:edit', kwargs={'path': u.path}))
def view_redirect(u: URLPath) -> HttpResponseRedirect:
return HttpResponseRedirect(u.get_absolute_url())
def wiki_redirect(u: URLPath) -> HttpResponseRedirect:
if 'automatically generated article' in u.article.current_revision.content:
return edit_redirect(u)
else:
return view_redirect(u)
@login_required
def problem(request: HttpRequest, puid: str) -> HttpResponse:
try:
u = URLPath.get_by_path(path=f'notable-problems/{puid}')
except URLPath.DoesNotExist:
parent = URLPath.get_by_path(path='notable-problems')
content = f'[problem {puid}]' + '\n' + '[/problem]' + '\n' * 2
content += f'(This is an automatically generated article for {puid}. Please add some content!)' + '\n' * 2
content += '## Statement' + '\n'
content += '[statement]'
u = URLPath.create_urlpath(
parent=parent,
slug=puid,
title=puid,
request=request,
content=content,
)
return wiki_redirect(u)
@login_required
def unitgroup(request: HttpRequest, pk: int) -> HttpResponse:
group = UnitGroup.objects.get(pk=pk)
if group.subject == "A" or group.subject == "F":
subject_name = 'algebra'
elif group.subject == "C":
subject_name = 'combinatorics'
elif group.subject == "G":
subject_name = 'geometry'
elif group.subject == "N":
subject_name = 'number-theory'
elif group.subject == "M":
subject_name = 'miscellaneous'
elif group.subject == "K":
subject_name = 'null'
else:
raise Exception(f"No subject for {group.name}.")
slug = slugify(group.name)
try:
u = URLPath.get_by_path(path=f'/units/list-of-{subject_name}-units/{slug}')
except URLPath.DoesNotExist:
parent = URLPath.get_by_path(path=f'/units/list-of-{subject_name}-units/')
content = f'[unit {group.name}]' + '\n' + '[/unit]' + '\n' * 2
content += f'(This is an automatically generated article for {group.name}. Please add some content!)' + '\n' * 2
u = URLPath.create_urlpath(
parent=parent,
slug=slug,
title=group.name,
request=request,
content=content,
)
return wiki_redirect(u)
|
the-stack_0_18598 | from __future__ import print_function
import os
from .bgwtask import BGWTask
from .kgrid import KgridTask, get_kpt_grid
from .inputs import SigmaInput
# Public
__all__ = ['SigmaTask']
class SigmaTask(BGWTask):
"""Self-energy calculation."""
_TASK_NAME = 'Sigma'
_input_fname = 'sigma.inp'
_output_fname = 'sigma.out'
def __init__(self, dirname, **kwargs):
"""
Arguments
---------
dirname : str
Directory in which the files are written and the code is executed.
Will be created if needed.
Keyword arguments
-----------------
(All mandatory unless specified otherwise)
structure : pymatgen.Structure
Structure object containing information on the unit cell.
ngkpt : list(3), float
K-points grid. Number of k-points along each primitive vector
of the reciprocal lattice.
kpts : 2D list(nkpt,3), float, optional
List of k-points.
K-points are either specified using ngkpt or using kpts.
ibnd_min : int
Minimum band index for GW corrections.
ibnd_max : int
Maximum band index for GW corrections.
ngqpt : list(3), float, optional
Q-points grid, for HF or hybrid functionals.
qpts : 2D list(nqpt,3), float, optional
List of q-points, for HF or hybrid functionals.
wfn_co_fname : str
Path to the wavefunction file produced by pw2bgw.
rho_fname : str
Path to the density file produced by pw2bgw.
vxc_dat_fname : str
Path to the vxc file produced by pw2bgw.
eps0mat_fname : str
Path to the eps0mat file produced by epsilon.
epsmat_fname : str
Path to the epsmat file produced by epsilon.
extra_lines : list, optional
Any other lines that should appear in the input file.
extra_variables : dict, optional
Any other variables that should be declared in the input file.
Properties
----------
sigma_fname : str
Path to the sigma_hp.log file produced.
eqp0_fname : str
Path to the eqp0.dat file produced.
eqp1_fname : str
Path to the eqp1.dat file produced.
"""
super(SigmaTask, self).__init__(dirname, **kwargs)
extra_lines = kwargs.get('extra_lines',[])
extra_variables = kwargs.get('extra_variables',{})
# Use specified kpoints or compute them from grid.
kpt_aliases = ('kpts', 'kpoints', 'sigma_kpts', 'sigma_k_points', 'sigma_kpoints')
for key in kpt_aliases:
if key in kwargs:
kpts = kwargs[key]
break
else:
# Compute k-points grids
#structure = kwargs['structure']
#ngkpt = kwargs['ngkpt']
#kpts, wtks = get_kpt_grid(structure, ngkpt)
kgrid_kwargs = dict()
for key in ('structure', 'ngkpt', 'fft', 'use_tr', 'clean_after'):
if key in kwargs:
kgrid_kwargs[key] = kwargs[key]
self.kgridtask = KgridTask(dirname=dirname, **kgrid_kwargs)
kpts, wtks = self.kgridtask.get_kpoints()
# Use specified qpoints or compute them from grid (HF).
if 'qpts' in kwargs:
qpts = kwargs['qpts']
elif 'ngqpt' in kwargs:
#structure = kwargs['structure']
#ngqpt = kwargs['ngqpt']
#qpts, wtqs = get_kpt_grid(structure, ngqpt)
kgrid_kwargs = dict(ngkpt=kwargs['ngqpt'])
for key in ('structure', 'fft', 'use_tr', 'clean_after'):
if key in kwargs:
kgrid_kwargs[key] = kwargs[key]
self.kgridtask = KgridTask(dirname=dirname, **kgrid_kwargs)
qpts, wtqs = self.kgridtask.get_kpoints()
else:
qpts = []
if 'ngqpt' in kwargs:
extra_variables['qpts'] = qpts
extra_variables['ngqpt'] = kwargs['ngqpt']
# Input file
self.input = SigmaInput(
kwargs['ibnd_min'],
kwargs['ibnd_max'],
kpts,
*extra_lines,
**extra_variables)
self.input.fname = self._input_fname
# Prepare links
self.wfn_co_fname = kwargs['wfn_co_fname']
self.rho_fname = kwargs['rho_fname']
if 'vxc_dat_fname' in kwargs:
self.vxc_dat_fname = kwargs['vxc_dat_fname']
elif 'vxc_fname' in kwargs:
self.vxc_fname = kwargs['vxc_fname']
else:
raise Exception(
"Either 'vxc_dat_fname' or 'vxc_fname' must be provided " +
"to SigmaTask.")
# It might be useful to issue a warning if those
# files are not specified, but one would have to check the value
# of frequency_dependence...
self.eps0mat_fname = kwargs.get('eps0mat_fname')
self.epsmat_fname = kwargs.get('epsmat_fname')
# Set up the run script
ex = 'sigma.cplx.x' if self._flavor_complex else 'sigma.real.x'
self.runscript['SIGMA'] = ex
self.runscript.append('$MPIRUN $SIGMA &> {}'.format(self._output_fname))
@property
def wfn_co_fname(self):
return self._wfn_co_fname
@wfn_co_fname.setter
def wfn_co_fname(self, value):
self._wfn_co_fname = value
self.update_link(value, 'WFN_inner')
@property
def rho_fname(self):
return self._rho_fname
@rho_fname.setter
def rho_fname(self, value):
self._rho_fname = value
self.update_link(value, 'RHO')
@property
def vxc_dat_fname(self):
return self._vxc_dat_fname
@vxc_dat_fname.setter
def vxc_dat_fname(self, value):
self._vxc_dat_fname = value
self.update_link(value, 'vxc.dat')
@property
def vxc_fname(self):
return self._vxc_fname
@vxc_fname.setter
def vxc_fname(self, value):
self._vxc_fname = value
self.update_link(value, 'VXC')
@property
def eps0mat_fname(self):
return self._eps0mat_fname
@eps0mat_fname.setter
def eps0mat_fname(self, value):
self._eps0mat_fname = value
dest = 'eps0mat.h5' if self._use_hdf5 else 'eps0mat'
self.update_link(value, dest)
@property
def epsmat_fname(self):
return self._epsmat_fname
@epsmat_fname.setter
def epsmat_fname(self, value):
self._epsmat_fname = value
dest = 'epsmat.h5' if self._use_hdf5 else 'epsmat'
self.update_link(value, dest)
def write(self):
super(SigmaTask, self).write()
with self.exec_from_dirname():
self.input.write()
@property
def sigma_fname(self):
"""Path to the sigma_hp.log file produced."""
return os.path.join(self.dirname, 'sigma_hp.log')
@property
def eqp0_fname(self):
"""Path to the eqp0.dat file produced."""
return os.path.join(self.dirname, 'eqp0.dat')
@property
def eqp1_fname(self):
"""Path to the eqp1.dat file produced."""
return os.path.join(self.dirname, 'eqp1.dat')
|
the-stack_0_18599 | """
Mask R-CNN
Configurations and data loading code for MS COCO.
Copyright (c) 2017 Matterport, Inc.
Licensed under the MIT License (see LICENSE for details)
Written by Waleed Abdulla
------------------------------------------------------------
Usage: import the module (see Jupyter notebooks for examples), or run from
the command line as such:
# Train a new model starting from pre-trained COCO weights
python3 coco.py train --dataset=/path/to/coco/ --model=coco
# Train a new model starting from ImageNet weights. Also auto download COCO dataset
python3 coco.py train --dataset=/path/to/coco/ --model=imagenet --download=True
# Continue training a model that you had trained earlier
python3 coco.py train --dataset=/path/to/coco/ --model=/path/to/weights.h5
# Continue training the last model you trained
python3 coco.py train --dataset=/path/to/coco/ --model=last
# Run COCO evaluatoin on the last model you trained
python3 coco.py evaluate --dataset=/path/to/coco/ --model=last
"""
import os
import sys
import time
import numpy as np
import imgaug # https://github.com/aleju/imgaug (pip3 install imgaug)
# Download and install the Python COCO tools from https://github.com/waleedka/coco
# That's a fork from the original https://github.com/pdollar/coco with a bug
# fix for Python 3.
# I submitted a pull request https://github.com/cocodataset/cocoapi/pull/50
# If the PR is merged then use the original repo.
# Note: Edit PythonAPI/Makefile and replace "python" with "python3".
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from pycocotools import mask as maskUtils
import zipfile
import urllib.request
import shutil
# Root directory of the project
ROOT_DIR = os.path.abspath("../../")
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
from mrcnn.config import Config
from mrcnn import model as modellib, utils
# Path to trained weights file
COCO_MODEL_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
# Directory to save logs and model checkpoints, if not provided
# through the command line argument --logs
DEFAULT_LOGS_DIR = os.path.join(ROOT_DIR, "logs")
DEFAULT_DATASET_YEAR = "2014"
############################################################
# Configurations
############################################################
class CocoConfig(Config):
"""Configuration for training on MS COCO.
Derives from the base Config class and overrides values specific
to the COCO dataset.
"""
# Give the configuration a recognizable name
NAME = "coco"
# We use a GPU with 12GB memory, which can fit two images.
# Adjust down if you use a smaller GPU.
IMAGES_PER_GPU = 1
# Uncomment to train on 8 GPUs (default is 1)
# GPU_COUNT = 8
# Number of classes (including background)
NUM_CLASSES = 1 + 20 # COCO has 80 classes
############################################################
# Dataset
############################################################
class CocoDataset(utils.Dataset):
def load_coco(self, dataset_dir, subset, year=DEFAULT_DATASET_YEAR, class_ids=None,
class_map=None, return_coco=False, auto_download=False):
"""Load a subset of the COCO dataset.
dataset_dir: The root directory of the COCO dataset.
subset: What to load (train, val, minival, valminusminival)
year: What dataset year to load (2014, 2017) as a string, not an integer
class_ids: If provided, only loads images that have the given classes.
class_map: TODO: Not implemented yet. Supports maping classes from
different datasets to the same class ID.
return_coco: If True, returns the COCO object.
auto_download: Automatically download and unzip MS-COCO images and annotations
"""
if auto_download is True:
self.auto_download(dataset_dir, subset, year)
coco = COCO("{}/annotations/instances_{}{}.json".format(dataset_dir, subset, year))
if subset == "minival" or subset == "valminusminival":
subset = "val"
image_dir = "{}/{}{}".format(dataset_dir, subset, year)
# Load all classes or a subset?
if not class_ids:
# All classes
class_ids = sorted(coco.getCatIds())
# All images
image_ids = list(coco.imgs.keys())
# Add classes
for i in class_ids:
self.add_class("coco", i, coco.loadCats(i)[0]["name"])
# Add images
for i in image_ids:
self.add_image(
"coco", image_id=i,
path=os.path.join(image_dir, coco.imgs[i]['file_name']),
width=coco.imgs[i]["width"],
height=coco.imgs[i]["height"],
annotations=coco.loadAnns(coco.getAnnIds(
imgIds=[i], catIds=class_ids, iscrowd=None)))
if return_coco:
return coco
def auto_download(self, dataDir, dataType, dataYear):
"""Download the COCO dataset/annotations if requested.
dataDir: The root directory of the COCO dataset.
dataType: What to load (train, val, minival, valminusminival)
dataYear: What dataset year to load (2014, 2017) as a string, not an integer
Note:
For 2014, use "train", "val", "minival", or "valminusminival"
For 2017, only "train" and "val" annotations are available
"""
# Setup paths and file names
if dataType == "minival" or dataType == "valminusminival":
imgDir = "{}/{}{}".format(dataDir, "val", dataYear)
imgZipFile = "{}/{}{}.zip".format(dataDir, "val", dataYear)
imgURL = "http://images.cocodataset.org/zips/{}{}.zip".format("val", dataYear)
else:
imgDir = "{}/{}{}".format(dataDir, dataType, dataYear)
imgZipFile = "{}/{}{}.zip".format(dataDir, dataType, dataYear)
imgURL = "http://images.cocodataset.org/zips/{}{}.zip".format(dataType, dataYear)
# print("Image paths:"); print(imgDir); print(imgZipFile); print(imgURL)
# Create main folder if it doesn't exist yet
if not os.path.exists(dataDir):
os.makedirs(dataDir)
# Download images if not available locally
if not os.path.exists(imgDir):
os.makedirs(imgDir)
print("Downloading images to " + imgZipFile + " ...")
with urllib.request.urlopen(imgURL) as resp, open(imgZipFile, 'wb') as out:
shutil.copyfileobj(resp, out)
print("... done downloading.")
print("Unzipping " + imgZipFile)
with zipfile.ZipFile(imgZipFile, "r") as zip_ref:
zip_ref.extractall(dataDir)
print("... done unzipping")
print("Will use images in " + imgDir)
# Setup annotations data paths
annDir = "{}/annotations".format(dataDir)
if dataType == "minival":
annZipFile = "{}/instances_minival2014.json.zip".format(dataDir)
annFile = "{}/instances_minival2014.json".format(annDir)
annURL = "https://dl.dropboxusercontent.com/s/o43o90bna78omob/instances_minival2014.json.zip?dl=0"
unZipDir = annDir
elif dataType == "valminusminival":
annZipFile = "{}/instances_valminusminival2014.json.zip".format(dataDir)
annFile = "{}/instances_valminusminival2014.json".format(annDir)
annURL = "https://dl.dropboxusercontent.com/s/s3tw5zcg7395368/instances_valminusminival2014.json.zip?dl=0"
unZipDir = annDir
else:
annZipFile = "{}/annotations_trainval{}.zip".format(dataDir, dataYear)
annFile = "{}/instances_{}{}.json".format(annDir, dataType, dataYear)
annURL = "http://images.cocodataset.org/annotations/annotations_trainval{}.zip".format(dataYear)
unZipDir = dataDir
# print("Annotations paths:"); print(annDir); print(annFile); print(annZipFile); print(annURL)
# Download annotations if not available locally
if not os.path.exists(annDir):
os.makedirs(annDir)
if not os.path.exists(annFile):
if not os.path.exists(annZipFile):
print("Downloading zipped annotations to " + annZipFile + " ...")
with urllib.request.urlopen(annURL) as resp, open(annZipFile, 'wb') as out:
shutil.copyfileobj(resp, out)
print("... done downloading.")
print("Unzipping " + annZipFile)
with zipfile.ZipFile(annZipFile, "r") as zip_ref:
zip_ref.extractall(unZipDir)
print("... done unzipping")
print("Will use annotations in " + annFile)
def load_mask(self, image_id):
"""Load instance masks for the given image.
Different datasets use different ways to store masks. This
function converts the different mask format to one format
in the form of a bitmap [height, width, instances].
Returns:
masks: A bool array of shape [height, width, instance count] with
one mask per instance.
class_ids: a 1D array of class IDs of the instance masks.
"""
# If not a COCO image, delegate to parent class.
image_info = self.image_info[image_id]
if image_info["source"] != "coco":
return super(CocoDataset, self).load_mask(image_id)
instance_masks = []
class_ids = []
annotations = self.image_info[image_id]["annotations"]
# Build mask of shape [height, width, instance_count] and list
# of class IDs that correspond to each channel of the mask.
for annotation in annotations:
class_id = self.map_source_class_id(
"coco.{}".format(annotation['category_id']))
if class_id:
m = self.annToMask(annotation, image_info["height"],
image_info["width"])
# Some objects are so small that they're less than 1 pixel area
# and end up rounded out. Skip those objects.
if m.max() < 1:
continue
# Is it a crowd? If so, use a negative class ID.
if annotation['iscrowd']:
# Use negative class ID for crowds
class_id *= -1
# For crowd masks, annToMask() sometimes returns a mask
# smaller than the given dimensions. If so, resize it.
if m.shape[0] != image_info["height"] or m.shape[1] != image_info["width"]:
m = np.ones([image_info["height"], image_info["width"]], dtype=bool)
instance_masks.append(m)
class_ids.append(class_id)
# Pack instance masks into an array
if class_ids:
mask = np.stack(instance_masks, axis=2).astype(np.bool)
class_ids = np.array(class_ids, dtype=np.int32)
return mask, class_ids
else:
# Call super class to return an empty mask
return super(CocoDataset, self).load_mask(image_id)
def image_reference(self, image_id):
"""Return a link to the image in the COCO Website."""
info = self.image_info[image_id]
if info["source"] == "coco":
return "http://cocodataset.org/#explore?id={}".format(info["id"])
else:
super(CocoDataset, self).image_reference(image_id)
# The following two functions are from pycocotools with a few changes.
def annToRLE(self, ann, height, width):
"""
Convert annotation which can be polygons, uncompressed RLE to RLE.
:return: binary mask (numpy 2D array)
"""
segm = ann['segmentation']
if isinstance(segm, list):
# polygon -- a single object might consist of multiple parts
# we merge all parts into one mask rle code
rles = maskUtils.frPyObjects(segm, height, width)
rle = maskUtils.merge(rles)
elif isinstance(segm['counts'], list):
# uncompressed RLE
rle = maskUtils.frPyObjects(segm, height, width)
else:
# rle
rle = ann['segmentation']
return rle
def annToMask(self, ann, height, width):
"""
Convert annotation which can be polygons, uncompressed RLE, or RLE to binary mask.
:return: binary mask (numpy 2D array)
"""
rle = self.annToRLE(ann, height, width)
m = maskUtils.decode(rle)
return m
############################################################
# COCO Evaluation
############################################################
def build_coco_results(dataset, image_ids, rois, class_ids, scores, masks):
"""Arrange resutls to match COCO specs in http://cocodataset.org/#format
"""
# If no results, return an empty list
if rois is None:
return []
results = []
for image_id in image_ids:
# Loop through detections
for i in range(rois.shape[0]):
class_id = class_ids[i]
score = scores[i]
bbox = np.around(rois[i], 1)
mask = masks[:, :, i]
result = {
"image_id": image_id,
"category_id": dataset.get_source_class_id(class_id, "coco"),
"bbox": [bbox[1], bbox[0], bbox[3] - bbox[1], bbox[2] - bbox[0]],
"score": score,
"segmentation": maskUtils.encode(np.asfortranarray(mask))
}
results.append(result)
return results
def evaluate_coco(model, dataset, coco, eval_type="bbox", limit=0, image_ids=None):
"""Runs official COCO evaluation.
dataset: A Dataset object with valiadtion data
eval_type: "bbox" or "segm" for bounding box or segmentation evaluation
limit: if not 0, it's the number of images to use for evaluation
"""
# Pick COCO images from the dataset
image_ids = image_ids or dataset.image_ids
# Limit to a subset
if limit:
image_ids = image_ids[:limit]
# Get corresponding COCO image IDs.
coco_image_ids = [dataset.image_info[id]["id"] for id in image_ids]
t_prediction = 0
t_start = time.time()
results = []
for i, image_id in enumerate(image_ids):
# Load image
image = dataset.load_image(image_id)
# Run detection
t = time.time()
r = model.detect([image], verbose=0)[0]
t_prediction += (time.time() - t)
# Convert results to COCO format
# Cast masks to uint8 because COCO tools errors out on bool
image_results = build_coco_results(dataset, coco_image_ids[i:i + 1],
r["rois"], r["class_ids"],
r["scores"],
r["masks"].astype(np.uint8))
results.extend(image_results)
# Load results. This modifies results with additional attributes.
coco_results = coco.loadRes(results)
# Evaluate
cocoEval = COCOeval(coco, coco_results, eval_type)
cocoEval.params.imgIds = coco_image_ids
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
print("Prediction time: {}. Average {}/image".format(
t_prediction, t_prediction / len(image_ids)))
print("Total time: ", time.time() - t_start)
############################################################
# Training
############################################################
if __name__ == '__main__':
import argparse
# Parse command line arguments
parser = argparse.ArgumentParser(
description='Train Mask R-CNN on MS COCO.')
parser.add_argument("command",
metavar="<command>",
help="'train' or 'evaluate' on MS COCO")
parser.add_argument('--dataset', required=True,
metavar="/path/to/coco/",
help='Directory of the MS-COCO dataset')
parser.add_argument('--year', required=False,
default=DEFAULT_DATASET_YEAR,
metavar="<year>",
help='Year of the MS-COCO dataset (2014 or 2017) (default=2014)')
parser.add_argument('--model', required=True,
metavar="/path/to/weights.h5",
help="Path to weights .h5 file or 'coco'")
parser.add_argument('--logs', required=False,
default=DEFAULT_LOGS_DIR,
metavar="/path/to/logs/",
help='Logs and checkpoints directory (default=logs/)')
parser.add_argument('--limit', required=False,
default=500,
metavar="<image count>",
help='Images to use for evaluation (default=500)')
parser.add_argument('--download', required=False,
default=False,
metavar="<True|False>",
help='Automatically download and unzip MS-COCO files (default=False)',
type=bool)
args = parser.parse_args()
print("Command: ", args.command)
print("Model: ", args.model)
print("Dataset: ", args.dataset)
print("Year: ", args.year)
print("Logs: ", args.logs)
print("Auto Download: ", args.download)
# Configurations
if args.command == "train":
config = CocoConfig()
else:
class InferenceConfig(CocoConfig):
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
GPU_COUNT = 1
IMAGES_PER_GPU = 1
DETECTION_MIN_CONFIDENCE = 0
config = InferenceConfig()
config.display()
# Create model
if args.command == "train":
model = modellib.MaskRCNN(mode="training", config=config,
model_dir=args.logs)
else:
model = modellib.MaskRCNN(mode="inference", config=config,
model_dir=args.logs)
# Select weights file to load
if args.model.lower() == "coco":
model_path = COCO_MODEL_PATH
elif args.model.lower() == "last":
# Find last trained weights
model_path = model.find_last()
elif args.model.lower() == "imagenet":
# Start from ImageNet trained weights
model_path = model.get_imagenet_weights()
else:
model_path = args.model
# Load weights
print("Loading weights ", model_path)
model.load_weights(model_path, by_name=True)
# Train or evaluate
if args.command == "train":
# Training dataset. Use the training set and 35K from the
# validation set, as as in the Mask RCNN paper.
dataset_train = CocoDataset()
dataset_train.load_coco(args.dataset, "train", year=args.year, auto_download=args.download)
if args.year in '2014':
dataset_train.load_coco(args.dataset, "valminusminival", year=args.year, auto_download=args.download)
dataset_train.prepare()
# Validation dataset
dataset_val = CocoDataset()
val_type = "val" if args.year in '2017' else "minival"
dataset_val.load_coco(args.dataset, val_type, year=args.year, auto_download=args.download)
dataset_val.prepare()
# Image Augmentation
# Right/Left flip 50% of the time
augmentation = imgaug.augmenters.Fliplr(0.5)
# *** This training schedule is an example. Update to your needs ***
# Training - Stage 1
print("Training network heads")
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE,
epochs=20,
layers='heads',
augmentation=augmentation)
# Training - Stage 2
# Finetune layers from ResNet stage 4 and up
print("Fine tune Resnet stage 4 and up")
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE,
epochs=60,
layers='4+',
augmentation=augmentation)
# Training - Stage 3
# Fine tune all layers
print("Fine tune all layers")
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE / 10,
epochs=80,
layers='all',
augmentation=augmentation)
elif args.command == "evaluate":
# Validation dataset
dataset_val = CocoDataset()
val_type = "val" if args.year in '2017' else "minival"
coco = dataset_val.load_coco(args.dataset, val_type, year=args.year, return_coco=True, auto_download=args.download)
dataset_val.prepare()
print("Running COCO evaluation on {} images.".format(args.limit))
evaluate_coco(model, dataset_val, coco, "bbox", limit=int(args.limit))
else:
print("'{}' is not recognized. "
"Use 'train' or 'evaluate'".format(args.command))
|
the-stack_0_18600 | #103
# Time: O(n)
# Space: O(n)
# Given a binary tree, return the zigzag level order traversal of
# its nodes' values. (ie, from left to right, then right to left
# for the next level and alternate between).
#
# For example:
# Given binary tree [3,9,20,null,null,15,7],
# 3
# / \
# 9 20
# / \
# 15 7
# return its zigzag level order traversal as:
# [
# [3],
# [20,9],
# [15,7]
# ]
class TreeNode():
def __init__(self,val):
self.val=val;
self.right=None
self.left=None
class BFSSol():
def zigzagLevelOrderTraversalBT(self,root):
level,next_level_node,zigzag_traversal=1,[root],[]
while next_level_node:
cur_level_node,cur_level_val=next_level_node,[]
next_level_node=[]
for cur_node in cur_level_node:
cur_level_val.append(cur_node.val)
if cur_node.left:
next_level_node.append(cur_node.left)
if cur_node.right:
next_level_node.append(cur_node.right)
if level%2:
zigzag_traversal.append(cur_level_val)
else:
zigzag_traversal.append(cur_level_val[::-1])
level+=1
return zigzag_traversal
|
the-stack_0_18603 | from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import include, path
from django.views import defaults as default_views
from django.views.generic import TemplateView, RedirectView
from rest_framework.authtoken.views import obtain_auth_token
from django.contrib.flatpages.views import flatpage
from backend.users.admin import constellation_admin as cl8_admin
from backend.users.views import sample_csv_template
urlpatterns = [
# serve the vue template instead of the default home
path("", TemplateView.as_view(template_name="pages/vue.html"), name="home"),
# Django Admin, use {% url 'admin:index' %}
path("admin/", cl8_admin.urls),
path("advanced-admin/", admin.site.urls),
path(
"admin/import-csv/sample.csv", sample_csv_template, name="sample-csv-template"
),
# User management
path("users/", include("backend.users.urls", namespace="users")),
path("accounts/", include("allauth.urls")),
path("about/", flatpage, {"url": "/about/"}, name="about"),
path("privacy/", flatpage, {"url": "/privacy/"}, name="privacy"),
# Your stuff: custom urls includes go here
path(
"favicon.ico", RedirectView.as_view(url="/static/images/favicons/favicon.ico")
),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
# API URLS
urlpatterns += [
# API base url
path("api/", include("config.api_router")),
# DRF auth token
path("auth-token/", obtain_auth_token),
path("", include("backend.users.api.passwordless_urls")),
]
# + [
# path('', TemplateView.as_view(template_name="pages/vue.html")),
# path('<path:resource>', TemplateView.as_view(template_name="pages/vue.html"))
# ]
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
path(
"400/",
default_views.bad_request,
kwargs={"exception": Exception("Bad Request!")},
),
path(
"403/",
default_views.permission_denied,
kwargs={"exception": Exception("Permission Denied")},
),
path(
"404/",
default_views.page_not_found,
kwargs={"exception": Exception("Page not Found")},
),
path("500/", default_views.server_error),
]
if "debug_toolbar" in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns = [path("__debug__/", include(debug_toolbar.urls))] + urlpatterns
|
the-stack_0_18604 | import _plotly_utils.basevalidators
class BordercolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self,
plotly_name='bordercolor',
parent_name='scattergl.hoverlabel',
**kwargs
):
super(BordercolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop('array_ok', True),
edit_type=kwargs.pop('edit_type', 'none'),
role=kwargs.pop('role', 'style'),
**kwargs
)
|
the-stack_0_18605 | '''
This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de).
PM4Py is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
PM4Py is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with PM4Py. If not, see <https://www.gnu.org/licenses/>.
'''
import os
import tempfile
import time
import deprecation
from lxml import etree, objectify
from pm4py.meta import VERSION
from pm4py.objects.petri.common import final_marking
from pm4py.objects.petri.obj import PetriNet, Marking
from pm4py.objects.petri.utils import add_arc_from_to
from pm4py.objects.random_variables.random_variable import RandomVariable
from pm4py.util import constants
@deprecation.deprecated(deprecated_in="2.1.1", removed_in="3.0",
current_version=VERSION,
details="Use the entrypoint import_from_string method")
def import_petri_from_string(petri_string, parameters=None):
"""
Import a Petri net from a string
Parameters
----------
petri_string
Petri net expressed as PNML string
parameters
Other parameters of the algorithm
"""
if parameters is None:
parameters = {}
fp = tempfile.NamedTemporaryFile(suffix='.pnml')
fp.close()
if type(petri_string) is bytes:
with open(fp.name, 'wb') as f:
f.write(petri_string)
else:
with open(fp.name, 'w') as f:
f.write(petri_string)
net, initial_marking, this_final_marking = import_net(fp.name, parameters=parameters)
os.remove(fp.name)
return net, initial_marking, this_final_marking
def import_net(input_file_path, parameters=None):
"""
Import a Petri net from a PNML file
Parameters
----------
input_file_path
Input file path
parameters
Other parameters of the algorithm
Returns
-----------
net
Petri net
im
Initial marking
fm
Final marking
"""
if parameters is None:
parameters = {}
parser = etree.XMLParser(remove_comments=True)
tree = objectify.parse(input_file_path, parser=parser)
root = tree.getroot()
return import_net_from_xml_object(root, parameters=parameters)
def import_net_from_string(petri_string, parameters=None):
"""
Imports a Petri net from a string
Parameters
-------------
petri_string
(Binary) string representing the Petri net
parameters
Parameters of the algorithm
Returns
-----------
net
Petri net
im
Initial marking
fm
Final marking
"""
if parameters is None:
parameters = {}
if type(petri_string) is str:
petri_string = petri_string.encode(constants.DEFAULT_ENCODING)
parser = etree.XMLParser(remove_comments=True)
root = objectify.fromstring(petri_string, parser=parser)
return import_net_from_xml_object(root, parameters=parameters)
def import_net_from_xml_object(root, parameters=None):
"""
Import a Petri net from an etree XML object
Parameters
----------
root
Root object of the XML
parameters
Other parameters of the algorithm
"""
if parameters is None:
parameters = {}
net = PetriNet('imported_' + str(time.time()))
marking = Marking()
fmarking = Marking()
nett = None
page = None
finalmarkings = None
stochastic_information = {}
for child in root:
nett = child
places_dict = {}
trans_dict = {}
if nett is not None:
for child in nett:
if "page" in child.tag:
page = child
if "finalmarkings" in child.tag:
finalmarkings = child
if page is None:
page = nett
if page is not None:
for child in page:
if "place" in child.tag:
position_X = None
position_Y = None
dimension_X = None
dimension_Y = None
place_id = child.get("id")
place_name = place_id
number = 0
for child2 in child:
if child2.tag.endswith('name'):
for child3 in child2:
if child3.text:
place_name = child3.text
if child2.tag.endswith('initialMarking'):
for child3 in child2:
if child3.tag.endswith("text"):
number = int(child3.text)
if child2.tag.endswith('graphics'):
for child3 in child2:
if child3.tag.endswith('position'):
position_X = float(child3.get("x"))
position_Y = float(child3.get("y"))
elif child3.tag.endswith("dimension"):
dimension_X = float(child3.get("x"))
dimension_Y = float(child3.get("y"))
places_dict[place_id] = PetriNet.Place(place_id)
places_dict[place_id].properties[constants.PLACE_NAME_TAG] = place_name
net.places.add(places_dict[place_id])
if position_X is not None and position_Y is not None and dimension_X is not None and dimension_Y is not None:
places_dict[place_id].properties[constants.LAYOUT_INFORMATION_PETRI] = (
(position_X, position_Y), (dimension_X, dimension_Y))
if number > 0:
marking[places_dict[place_id]] = number
del place_name
if page is not None:
for child in page:
if child.tag.endswith("transition"):
position_X = None
position_Y = None
dimension_X = None
dimension_Y = None
trans_id = child.get("id")
trans_name = trans_id
trans_visible = True
random_variable = None
for child2 in child:
if child2.tag.endswith("name"):
for child3 in child2:
if child3.text:
if trans_name == trans_id:
trans_name = child3.text
if child2.tag.endswith("graphics"):
for child3 in child2:
if child3.tag.endswith("position"):
position_X = float(child3.get("x"))
position_Y = float(child3.get("y"))
elif child3.tag.endswith("dimension"):
dimension_X = float(child3.get("x"))
dimension_Y = float(child3.get("y"))
if child2.tag.endswith("toolspecific"):
tool = child2.get("tool")
if "ProM" in tool:
activity = child2.get("activity")
if "invisible" in activity:
trans_visible = False
elif "StochasticPetriNet" in tool:
distribution_type = None
distribution_parameters = None
priority = None
weight = None
for child3 in child2:
key = child3.get("key")
value = child3.text
if key == "distributionType":
distribution_type = value
elif key == "distributionParameters":
distribution_parameters = value
elif key == "priority":
priority = int(value)
elif key == "weight":
weight = float(value)
random_variable = RandomVariable()
random_variable.read_from_string(distribution_type, distribution_parameters)
random_variable.set_priority(priority)
random_variable.set_weight(weight)
# 15/02/2021: the name associated in the PNML to invisible transitions was lost.
# at least save that as property.
if trans_visible:
trans_label = trans_name
else:
trans_label = None
trans_dict[trans_id] = PetriNet.Transition(trans_id, trans_label)
trans_dict[trans_id].properties[constants.TRANS_NAME_TAG] = trans_name
net.transitions.add(trans_dict[trans_id])
if random_variable is not None:
trans_dict[trans_id].properties[constants.STOCHASTIC_DISTRIBUTION] = random_variable
if position_X is not None and position_Y is not None and dimension_X is not None and dimension_Y is not None:
trans_dict[trans_id].properties[constants.LAYOUT_INFORMATION_PETRI] = (
(position_X, position_Y), (dimension_X, dimension_Y))
if page is not None:
for child in page:
if child.tag.endswith("arc"):
arc_source = child.get("source")
arc_target = child.get("target")
arc_weight = 1
for arc_child in child:
if arc_child.tag.endswith("inscription"):
for text_arcweight in arc_child:
if text_arcweight.tag.endswith("text"):
arc_weight = int(text_arcweight.text)
if arc_source in places_dict and arc_target in trans_dict:
add_arc_from_to(places_dict[arc_source], trans_dict[arc_target], net, weight=arc_weight)
elif arc_target in places_dict and arc_source in trans_dict:
add_arc_from_to(trans_dict[arc_source], places_dict[arc_target], net, weight=arc_weight)
if finalmarkings is not None:
for child in finalmarkings:
for child2 in child:
place_id = child2.get("idref")
for child3 in child2:
if child3.tag.endswith("text"):
number = int(child3.text)
if number > 0:
fmarking[places_dict[place_id]] = number
# generate the final marking in the case has not been found
if len(fmarking) == 0:
fmarking = final_marking.discover_final_marking(net)
return net, marking, fmarking
|
the-stack_0_18606 | from pathlib import Path
import rlbottraining.exercise_runner as er
from rlbot.matchconfig.conversions import read_match_config_from_file
from rlbot.matchconfig.match_config import Team, PlayerConfig
from rlbottraining.training_exercise import Playlist
from training import PlaygroundExercise
def make_match_config():
match_config = read_match_config_from_file(Path('match.cfg'))
playerConfig = PlayerConfig.bot_config(
Path(__file__).absolute().parent / 'bot.cfg', Team.BLUE)
match_config.player_configs = [
playerConfig
]
return match_config
match_config = make_match_config()
def make_default_playlist() -> Playlist:
exercises = [
PlaygroundExercise('PlaygroundExercise')
]
for exercise in exercises:
exercise.match_config = match_config
return exercises
if __name__ == "__main__":
er.run_module(Path(__file__).absolute(), reload_policy=er.ReloadPolicy.NEVER)
|
the-stack_0_18608 | from typing import List, Optional, Tuple
def get_sum_comps(numbers: List[int], sum: int) -> Optional[Tuple[int, int]]:
for num1 in numbers:
num2 = sum - num1
if num2 in numbers:
return num1, num2
return None
def find_contagious_num(numbers: List[int], sample_size: int) -> Optional[Tuple[int, int]]:
for start_pos in range(len(numbers) - sample_size):
end_pos = start_pos + sample_size
sum_comps = get_sum_comps(numbers[start_pos: end_pos], numbers[end_pos])
if not sum_comps:
return end_pos, numbers[end_pos]
return None
def f1(numbers: List[int], sample_size: int) -> None:
_, num = find_contagious_num(numbers, sample_size)
print('f1: ', num)
def f2_v1(numbers: List[int], sample_size: int) -> None:
pos, num = find_contagious_num(numbers, sample_size)
for start_pos in range(pos):
for end_pos in range(start_pos+3, pos+1):
if sum(numbers[start_pos:end_pos]) == num:
sorted_val = sorted(numbers[start_pos:end_pos])
print('f2_v1:', sorted_val[0] + sorted_val[-1])
return
def f2_v2(numbers: List[int], sample_size: int) -> None:
pos, num = find_contagious_num(numbers, sample_size)
for i in range(3, pos):
l = pos - i
for start_pos in range(pos-l):
end_pos = start_pos + l
if sum(numbers[start_pos:end_pos]) == num:
sorted_val = sorted(numbers[start_pos:end_pos])
print('f2_v2:', sorted_val[0] + sorted_val[-1])
return
from input_loader import load_input
NUMBERS = [int(num_str.strip()) for num_str in load_input().split('\n')]
SAMPLE_SIZE = 25
f1(NUMBERS, SAMPLE_SIZE)
f2_v1(NUMBERS, SAMPLE_SIZE)
f2_v2(NUMBERS, SAMPLE_SIZE) |
the-stack_0_18610 | #!/usr/bin/env python
"""
Convert a base map from bin to json format
"""
import argparse
from modules.map.proto.map_pb2 import Map
from google.protobuf import text_format
from google.protobuf.json_format import MessageToJson
def main():
parser = argparse.ArgumentParser(
description='Convert a base map from txt to json format')
parser.add_argument(
'-i',
'--input_file',
help='Input base map in txt format',
type=str,
default='modules/map/data/gen/base_map.bin')
parser.add_argument(
'-o',
'--output_file',
help='Output base map in bin format',
type=str,
default='modules/map/data/gen/base_map.json')
args = vars(parser.parse_args())
input_file_name = args['input_file']
output_file_name = args['output_file']
with open(input_file_name, 'r') as f:
mp = Map()
mp.ParseFromString(f.read())
# Output json
with open(output_file_name, "wb") as f:
f.write(MessageToJson(mp))
if __name__ == '__main__':
main()
|
the-stack_0_18611 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import ast
import json
import operator
import re
import warnings
import numpy as np
import six
import pyarrow as pa
from pyarrow.lib import _pandas_api
from pyarrow.compat import (builtin_pickle, # noqa
PY2, zip_longest, Sequence, u_utf8)
_logical_type_map = {}
def get_logical_type_map():
global _logical_type_map
if not _logical_type_map:
_logical_type_map.update({
pa.lib.Type_NA: 'empty',
pa.lib.Type_BOOL: 'bool',
pa.lib.Type_INT8: 'int8',
pa.lib.Type_INT16: 'int16',
pa.lib.Type_INT32: 'int32',
pa.lib.Type_INT64: 'int64',
pa.lib.Type_UINT8: 'uint8',
pa.lib.Type_UINT16: 'uint16',
pa.lib.Type_UINT32: 'uint32',
pa.lib.Type_UINT64: 'uint64',
pa.lib.Type_HALF_FLOAT: 'float16',
pa.lib.Type_FLOAT: 'float32',
pa.lib.Type_DOUBLE: 'float64',
pa.lib.Type_DATE32: 'date',
pa.lib.Type_DATE64: 'date',
pa.lib.Type_TIME32: 'time',
pa.lib.Type_TIME64: 'time',
pa.lib.Type_BINARY: 'bytes',
pa.lib.Type_FIXED_SIZE_BINARY: 'bytes',
pa.lib.Type_STRING: 'unicode',
})
return _logical_type_map
def get_logical_type(arrow_type):
logical_type_map = get_logical_type_map()
try:
return logical_type_map[arrow_type.id]
except KeyError:
if isinstance(arrow_type, pa.lib.DictionaryType):
return 'categorical'
elif isinstance(arrow_type, pa.lib.ListType):
return 'list[{}]'.format(get_logical_type(arrow_type.value_type))
elif isinstance(arrow_type, pa.lib.TimestampType):
return 'datetimetz' if arrow_type.tz is not None else 'datetime'
elif isinstance(arrow_type, pa.lib.Decimal128Type):
return 'decimal'
return 'object'
_numpy_logical_type_map = {
np.bool_: 'bool',
np.int8: 'int8',
np.int16: 'int16',
np.int32: 'int32',
np.int64: 'int64',
np.uint8: 'uint8',
np.uint16: 'uint16',
np.uint32: 'uint32',
np.uint64: 'uint64',
np.float32: 'float32',
np.float64: 'float64',
'datetime64[D]': 'date',
np.unicode_: 'string' if not PY2 else 'unicode',
np.bytes_: 'bytes' if not PY2 else 'string',
}
def get_logical_type_from_numpy(pandas_collection):
try:
return _numpy_logical_type_map[pandas_collection.dtype.type]
except KeyError:
if hasattr(pandas_collection.dtype, 'tz'):
return 'datetimetz'
# See https://github.com/pandas-dev/pandas/issues/24739
if str(pandas_collection.dtype) == 'datetime64[ns]':
return 'datetime64[ns]'
result = _pandas_api.infer_dtype(pandas_collection)
if result == 'string':
return 'bytes' if PY2 else 'unicode'
return result
def get_extension_dtype_info(column):
dtype = column.dtype
if str(dtype) == 'category':
cats = getattr(column, 'cat', column)
assert cats is not None
metadata = {
'num_categories': len(cats.categories),
'ordered': cats.ordered,
}
physical_dtype = str(cats.codes.dtype)
elif hasattr(dtype, 'tz'):
metadata = {'timezone': pa.lib.tzinfo_to_string(dtype.tz)}
physical_dtype = 'datetime64[ns]'
else:
metadata = None
physical_dtype = str(dtype)
return physical_dtype, metadata
def get_column_metadata(column, name, arrow_type, field_name):
"""Construct the metadata for a given column
Parameters
----------
column : pandas.Series or pandas.Index
name : str
arrow_type : pyarrow.DataType
field_name : str
Equivalent to `name` when `column` is a `Series`, otherwise if `column`
is a pandas Index then `field_name` will not be the same as `name`.
This is the name of the field in the arrow Table's schema.
Returns
-------
dict
"""
logical_type = get_logical_type(arrow_type)
string_dtype, extra_metadata = get_extension_dtype_info(column)
if logical_type == 'decimal':
extra_metadata = {
'precision': arrow_type.precision,
'scale': arrow_type.scale,
}
string_dtype = 'object'
if name is not None and not isinstance(name, six.string_types):
raise TypeError(
'Column name must be a string. Got column {} of type {}'.format(
name, type(name).__name__
)
)
assert field_name is None or isinstance(field_name, six.string_types), \
str(type(field_name))
return {
'name': name,
'field_name': 'None' if field_name is None else field_name,
'pandas_type': logical_type,
'numpy_type': string_dtype,
'metadata': extra_metadata,
}
def construct_metadata(df, column_names, index_levels, index_descriptors,
preserve_index, types):
"""Returns a dictionary containing enough metadata to reconstruct a pandas
DataFrame as an Arrow Table, including index columns.
Parameters
----------
df : pandas.DataFrame
index_levels : List[pd.Index]
index_descriptors : List[Dict]
preserve_index : bool
types : List[pyarrow.DataType]
Returns
-------
dict
"""
num_serialized_index_levels = len([descr for descr in index_descriptors
if not isinstance(descr, dict)])
# Use ntypes instead of Python shorthand notation [:-len(x)] as [:-0]
# behaves differently to what we want.
ntypes = len(types)
df_types = types[:ntypes - num_serialized_index_levels]
index_types = types[ntypes - num_serialized_index_levels:]
column_metadata = []
for col_name, sanitized_name, arrow_type in zip(df.columns, column_names,
df_types):
metadata = get_column_metadata(df[col_name], name=sanitized_name,
arrow_type=arrow_type,
field_name=sanitized_name)
column_metadata.append(metadata)
index_column_metadata = []
if preserve_index is not False:
for level, arrow_type, descriptor in zip(index_levels, index_types,
index_descriptors):
if isinstance(descriptor, dict):
# The index is represented in a non-serialized fashion,
# e.g. RangeIndex
continue
metadata = get_column_metadata(level, name=level.name,
arrow_type=arrow_type,
field_name=descriptor)
index_column_metadata.append(metadata)
column_indexes = []
for level in getattr(df.columns, 'levels', [df.columns]):
metadata = _get_simple_index_descriptor(level)
column_indexes.append(metadata)
else:
index_descriptors = index_column_metadata = column_indexes = []
return {
b'pandas': json.dumps({
'index_columns': index_descriptors,
'column_indexes': column_indexes,
'columns': column_metadata + index_column_metadata,
'creator': {
'library': 'pyarrow',
'version': pa.__version__
},
'pandas_version': _pandas_api.version
}).encode('utf8')
}
def _get_simple_index_descriptor(level):
string_dtype, extra_metadata = get_extension_dtype_info(level)
pandas_type = get_logical_type_from_numpy(level)
if 'mixed' in pandas_type:
warnings.warn(
"The DataFrame has column names of mixed type. They will be "
"converted to strings and not roundtrip correctly.",
UserWarning, stacklevel=4)
if pandas_type == 'unicode':
assert not extra_metadata
extra_metadata = {'encoding': 'UTF-8'}
return {
'name': level.name,
'field_name': level.name,
'pandas_type': pandas_type,
'numpy_type': string_dtype,
'metadata': extra_metadata,
}
def _column_name_to_strings(name):
"""Convert a column name (or level) to either a string or a recursive
collection of strings.
Parameters
----------
name : str or tuple
Returns
-------
value : str or tuple
Examples
--------
>>> name = 'foo'
>>> _column_name_to_strings(name)
'foo'
>>> name = ('foo', 'bar')
>>> _column_name_to_strings(name)
('foo', 'bar')
>>> import pandas as pd
>>> name = (1, pd.Timestamp('2017-02-01 00:00:00'))
>>> _column_name_to_strings(name)
('1', '2017-02-01 00:00:00')
"""
if isinstance(name, six.string_types):
return name
elif isinstance(name, six.binary_type):
# XXX: should we assume that bytes in Python 3 are UTF-8?
return name.decode('utf8')
elif isinstance(name, tuple):
return str(tuple(map(_column_name_to_strings, name)))
elif isinstance(name, Sequence):
raise TypeError("Unsupported type for MultiIndex level")
elif name is None:
return None
return str(name)
def _index_level_name(index, i, column_names):
"""Return the name of an index level or a default name if `index.name` is
None or is already a column name.
Parameters
----------
index : pandas.Index
i : int
Returns
-------
name : str
"""
if index.name is not None and index.name not in column_names:
return index.name
else:
return '__index_level_{:d}__'.format(i)
def _get_columns_to_convert(df, schema, preserve_index, columns):
columns = _resolve_columns_of_interest(df, schema, columns)
if not df.columns.is_unique:
raise ValueError(
'Duplicate column names found: {}'.format(list(df.columns))
)
if schema is not None:
return _get_columns_to_convert_given_schema(df, schema, preserve_index)
column_names = []
index_levels = (
_get_index_level_values(df.index) if preserve_index is not False
else []
)
columns_to_convert = []
convert_fields = []
for name in columns:
col = df[name]
name = _column_name_to_strings(name)
if _pandas_api.is_sparse(col):
raise TypeError(
"Sparse pandas data (column {}) not supported.".format(name))
columns_to_convert.append(col)
convert_fields.append(None)
column_names.append(name)
index_descriptors = []
index_column_names = []
for i, index_level in enumerate(index_levels):
name = _index_level_name(index_level, i, column_names)
if (isinstance(index_level, _pandas_api.pd.RangeIndex)
and preserve_index is None):
descr = _get_range_index_descriptor(index_level)
else:
columns_to_convert.append(index_level)
convert_fields.append(None)
descr = name
index_column_names.append(name)
index_descriptors.append(descr)
all_names = column_names + index_column_names
# all_names : all of the columns in the resulting table including the data
# columns and serialized index columns
# column_names : the names of the data columns
# index_column_names : the names of the serialized index columns
# index_descriptors : descriptions of each index to be used for
# reconstruction
# index_levels : the extracted index level values
# columns_to_convert : assembled raw data (both data columns and indexes)
# to be converted to Arrow format
# columns_fields : specified column to use for coercion / casting
# during serialization, if a Schema was provided
return (all_names, column_names, index_column_names, index_descriptors,
index_levels, columns_to_convert, convert_fields)
def _get_columns_to_convert_given_schema(df, schema, preserve_index):
"""
Specialized version of _get_columns_to_convert in case a Schema is
specified.
In that case, the Schema is used as the single point of truth for the
table structure (types, which columns are included, order of columns, ...).
"""
column_names = []
columns_to_convert = []
convert_fields = []
index_descriptors = []
index_column_names = []
index_levels = []
for name in schema.names:
try:
col = df[name]
is_index = False
except KeyError:
if preserve_index is not False and name in df.index.names:
col = df.index.get_level_values(name)
if (preserve_index is None and
isinstance(col, _pandas_api.pd.RangeIndex)):
raise ValueError(
"name '{}' is present in the schema, but it is a "
"RangeIndex which will not be converted as a column "
"in the Table, but saved as metadata-only not in "
"columns. Specify 'preserve_index=True' to force it "
"being added as a column, or remove it from the "
"specified schema".format(name))
is_index = True
else:
raise KeyError(
"name '{}' present in the specified schema is not found "
"in the columns or index".format(name))
name = _column_name_to_strings(name)
if _pandas_api.is_sparse(col):
raise TypeError(
"Sparse pandas data (column {}) not supported.".format(name))
field = schema.field(name)
columns_to_convert.append(col)
convert_fields.append(field)
column_names.append(name)
if is_index:
index_column_names.append(name)
index_descriptors.append(name)
index_levels.append(col)
all_names = column_names + index_column_names
return (all_names, column_names, index_column_names, index_descriptors,
index_levels, columns_to_convert, convert_fields)
def _get_range_index_descriptor(level):
# public start/stop/step attributes added in pandas 0.25.0
return {
'kind': 'range',
'name': level.name,
'start': _pandas_api.get_rangeindex_attribute(level, 'start'),
'stop': _pandas_api.get_rangeindex_attribute(level, 'stop'),
'step': _pandas_api.get_rangeindex_attribute(level, 'step')
}
def _get_index_level_values(index):
n = len(getattr(index, 'levels', [index]))
return [index.get_level_values(i) for i in range(n)]
def _resolve_columns_of_interest(df, schema, columns):
if schema is not None and columns is not None:
raise ValueError('Schema and columns arguments are mutually '
'exclusive, pass only one of them')
elif schema is not None:
columns = schema.names
elif columns is not None:
columns = [c for c in columns if c in df.columns]
else:
columns = df.columns
return columns
def dataframe_to_types(df, preserve_index, columns=None):
(all_names,
column_names,
_,
index_descriptors,
index_columns,
columns_to_convert,
_) = _get_columns_to_convert(df, None, preserve_index, columns)
types = []
# If pandas knows type, skip conversion
for c in columns_to_convert:
values = c.values
if _pandas_api.is_categorical(values):
type_ = pa.array(c, from_pandas=True).type
else:
values, type_ = get_datetimetz_type(values, c.dtype, None)
type_ = pa.lib._ndarray_to_arrow_type(values, type_)
if type_ is None:
type_ = pa.array(c, from_pandas=True).type
types.append(type_)
metadata = construct_metadata(df, column_names, index_columns,
index_descriptors, preserve_index, types)
return all_names, types, metadata
def dataframe_to_arrays(df, schema, preserve_index, nthreads=1, columns=None,
safe=True):
(all_names,
column_names,
index_column_names,
index_descriptors,
index_columns,
columns_to_convert,
convert_fields) = _get_columns_to_convert(df, schema, preserve_index,
columns)
# NOTE(wesm): If nthreads=None, then we use a heuristic to decide whether
# using a thread pool is worth it. Currently the heuristic is whether the
# nrows > 100 * ncols.
if nthreads is None:
nrows, ncols = len(df), len(df.columns)
if nrows > ncols * 100:
nthreads = pa.cpu_count()
else:
nthreads = 1
def convert_column(col, field):
if field is None:
field_nullable = True
type_ = None
else:
field_nullable = field.nullable
type_ = field.type
try:
result = pa.array(col, type=type_, from_pandas=True, safe=safe)
except (pa.ArrowInvalid,
pa.ArrowNotImplementedError,
pa.ArrowTypeError) as e:
e.args += ("Conversion failed for column {0!s} with type {1!s}"
.format(col.name, col.dtype),)
raise e
if not field_nullable and result.null_count > 0:
raise ValueError("Field {} was non-nullable but pandas column "
"had {} null values".format(str(field),
result.null_count))
return result
if nthreads == 1:
arrays = [convert_column(c, f)
for c, f in zip(columns_to_convert, convert_fields)]
else:
from concurrent import futures
with futures.ThreadPoolExecutor(nthreads) as executor:
arrays = list(executor.map(convert_column, columns_to_convert,
convert_fields))
types = [x.type for x in arrays]
if schema is None:
fields = []
for name, type_ in zip(all_names, types):
name = name if name is not None else 'None'
fields.append(pa.field(name, type_))
schema = pa.schema(fields)
metadata = construct_metadata(df, column_names, index_columns,
index_descriptors, preserve_index,
types)
schema = schema.with_metadata(metadata)
return arrays, schema
def get_datetimetz_type(values, dtype, type_):
if values.dtype.type != np.datetime64:
return values, type_
if _pandas_api.is_datetimetz(dtype) and type_ is None:
# If no user type passed, construct a tz-aware timestamp type
tz = dtype.tz
unit = dtype.unit
type_ = pa.timestamp(unit, tz)
elif type_ is None:
# Trust the NumPy dtype
type_ = pa.from_numpy_dtype(values.dtype)
return values, type_
# ----------------------------------------------------------------------
# Converting pandas.DataFrame to a dict containing only NumPy arrays or other
# objects friendly to pyarrow.serialize
def dataframe_to_serialized_dict(frame):
import pandas.core.internals as _int
block_manager = frame._data
blocks = []
axes = [ax for ax in block_manager.axes]
for block in block_manager.blocks:
values = block.values
block_data = {}
if isinstance(block, _int.DatetimeTZBlock):
block_data['timezone'] = pa.lib.tzinfo_to_string(values.tz)
if hasattr(values, 'values'):
values = values.values
elif isinstance(block, _int.CategoricalBlock):
block_data.update(dictionary=values.categories,
ordered=values.ordered)
values = values.codes
block_data.update(
placement=block.mgr_locs.as_array,
block=values
)
# If we are dealing with an object array, pickle it instead. Note that
# we do not use isinstance here because _int.CategoricalBlock is a
# subclass of _int.ObjectBlock.
if type(block) == _int.ObjectBlock:
block_data['object'] = None
block_data['block'] = builtin_pickle.dumps(
values, protocol=builtin_pickle.HIGHEST_PROTOCOL)
blocks.append(block_data)
return {
'blocks': blocks,
'axes': axes
}
def serialized_dict_to_dataframe(data):
import pandas.core.internals as _int
reconstructed_blocks = [_reconstruct_block(block)
for block in data['blocks']]
block_mgr = _int.BlockManager(reconstructed_blocks, data['axes'])
return _pandas_api.data_frame(block_mgr)
def _reconstruct_block(item):
import pandas.core.internals as _int
# Construct the individual blocks converting dictionary types to pandas
# categorical types and Timestamps-with-timezones types to the proper
# pandas Blocks
block_arr = item.get('block', None)
placement = item['placement']
if 'dictionary' in item:
cat = _pandas_api.categorical_type.from_codes(
block_arr, categories=item['dictionary'],
ordered=item['ordered'])
block = _int.make_block(cat, placement=placement,
klass=_int.CategoricalBlock)
elif 'timezone' in item:
dtype = make_datetimetz(item['timezone'])
block = _int.make_block(block_arr, placement=placement,
klass=_int.DatetimeTZBlock,
dtype=dtype)
elif 'object' in item:
block = _int.make_block(builtin_pickle.loads(block_arr),
placement=placement, klass=_int.ObjectBlock)
elif 'py_array' in item:
arr = item['py_array']
# TODO have mechanism to know a method to create a
# pandas ExtensionArray given the pyarrow type
# Now hardcode here to create a pandas IntegerArray for the example
arr = arr.chunk(0)
buflist = arr.buffers()
data = np.frombuffer(buflist[-1], dtype=arr.type.to_pandas_dtype())[
arr.offset:arr.offset + len(arr)]
bitmask = buflist[0]
if bitmask is not None:
mask = pa.BooleanArray.from_buffers(
pa.bool_(), len(arr), [None, bitmask])
mask = np.asarray(mask)
else:
mask = np.ones(len(arr), dtype=bool)
block_arr = _pandas_api.pd.arrays.IntegerArray(
data.copy(), ~mask, copy=False)
# create ExtensionBlock
block = _int.make_block(block_arr, placement=placement,
klass=_int.ExtensionBlock)
else:
block = _int.make_block(block_arr, placement=placement)
return block
def make_datetimetz(tz):
tz = pa.lib.string_to_tzinfo(tz)
return _pandas_api.datetimetz_type('ns', tz=tz)
# ----------------------------------------------------------------------
# Converting pyarrow.Table efficiently to pandas.DataFrame
def table_to_blockmanager(options, table, categories=None,
extension_columns=None, ignore_metadata=False):
from pandas.core.internals import BlockManager
all_columns = []
column_indexes = []
pandas_metadata = table.schema.pandas_metadata
if not ignore_metadata and pandas_metadata is not None:
all_columns = pandas_metadata['columns']
column_indexes = pandas_metadata.get('column_indexes', [])
index_descriptors = pandas_metadata['index_columns']
table = _add_any_metadata(table, pandas_metadata)
table, index = _reconstruct_index(table, index_descriptors,
all_columns)
else:
index = _pandas_api.pd.RangeIndex(table.num_rows)
_check_data_column_metadata_consistency(all_columns)
blocks = _table_to_blocks(options, table, categories, extension_columns)
columns = _deserialize_column_index(table, all_columns, column_indexes)
axes = [columns, index]
return BlockManager(blocks, axes)
def _check_data_column_metadata_consistency(all_columns):
# It can never be the case in a released version of pyarrow that
# c['name'] is None *and* 'field_name' is not a key in the column metadata,
# because the change to allow c['name'] to be None and the change to add
# 'field_name' are in the same release (0.8.0)
assert all(
(c['name'] is None and 'field_name' in c) or c['name'] is not None
for c in all_columns
)
def _deserialize_column_index(block_table, all_columns, column_indexes):
column_strings = [u_utf8(x) for x in block_table.column_names]
if all_columns:
columns_name_dict = {
c.get('field_name', _column_name_to_strings(c['name'])): c['name']
for c in all_columns
}
columns_values = [
columns_name_dict.get(name, name) for name in column_strings
]
else:
columns_values = column_strings
# If we're passed multiple column indexes then evaluate with
# ast.literal_eval, since the column index values show up as a list of
# tuples
to_pair = ast.literal_eval if len(column_indexes) > 1 else lambda x: (x,)
# Create the column index
# Construct the base index
if not columns_values:
columns = _pandas_api.pd.Index(columns_values)
else:
columns = _pandas_api.pd.MultiIndex.from_tuples(
list(map(to_pair, columns_values)),
names=[col_index['name'] for col_index in column_indexes] or None,
)
# if we're reconstructing the index
if len(column_indexes) > 0:
columns = _reconstruct_columns_from_metadata(columns, column_indexes)
# ARROW-1751: flatten a single level column MultiIndex for pandas 0.21.0
columns = _flatten_single_level_multiindex(columns)
return columns
def _reconstruct_index(table, index_descriptors, all_columns):
# 0. 'field_name' is the name of the column in the arrow Table
# 1. 'name' is the user-facing name of the column, that is, it came from
# pandas
# 2. 'field_name' and 'name' differ for index columns
# 3. We fall back on c['name'] for backwards compatibility
field_name_to_metadata = {
c.get('field_name', c['name']): c
for c in all_columns
}
# Build up a list of index columns and names while removing those columns
# from the original table
index_arrays = []
index_names = []
result_table = table
for descr in index_descriptors:
if isinstance(descr, six.string_types):
result_table, index_level, index_name = _extract_index_level(
table, result_table, descr, field_name_to_metadata)
if index_level is None:
# ARROW-1883: the serialized index column was not found
continue
elif descr['kind'] == 'range':
index_name = descr['name']
index_level = _pandas_api.pd.RangeIndex(descr['start'],
descr['stop'],
step=descr['step'],
name=index_name)
if len(index_level) != len(table):
# Possibly the result of munged metadata
continue
else:
raise ValueError("Unrecognized index kind: {0}"
.format(descr['kind']))
index_arrays.append(index_level)
index_names.append(index_name)
pd = _pandas_api.pd
# Reconstruct the row index
if len(index_arrays) > 1:
index = pd.MultiIndex.from_arrays(index_arrays, names=index_names)
elif len(index_arrays) == 1:
index = index_arrays[0]
if not isinstance(index, pd.Index):
# Box anything that wasn't boxed above
index = pd.Index(index, name=index_names[0])
else:
index = pd.RangeIndex(table.num_rows)
return result_table, index
def _extract_index_level(table, result_table, field_name,
field_name_to_metadata):
logical_name = field_name_to_metadata[field_name]['name']
index_name = _backwards_compatible_index_name(field_name, logical_name)
i = table.schema.get_field_index(field_name)
if i == -1:
# The serialized index column was removed by the user
return table, None, None
pd = _pandas_api.pd
col = table.column(i)
values = col.to_pandas().values
if hasattr(values, 'flags') and not values.flags.writeable:
# ARROW-1054: in pandas 0.19.2, factorize will reject
# non-writeable arrays when calling MultiIndex.from_arrays
values = values.copy()
if isinstance(col.type, pa.lib.TimestampType):
index_level = (pd.Series(values).dt.tz_localize('utc')
.dt.tz_convert(col.type.tz))
else:
index_level = pd.Series(values, dtype=values.dtype)
result_table = result_table.remove_column(
result_table.schema.get_field_index(field_name)
)
return result_table, index_level, index_name
def _backwards_compatible_index_name(raw_name, logical_name):
"""Compute the name of an index column that is compatible with older
versions of :mod:`pyarrow`.
Parameters
----------
raw_name : str
logical_name : str
Returns
-------
result : str
Notes
-----
* Part of :func:`~pyarrow.pandas_compat.table_to_blockmanager`
"""
# Part of table_to_blockmanager
if raw_name == logical_name and _is_generated_index_name(raw_name):
return None
else:
return logical_name
def _is_generated_index_name(name):
pattern = r'^__index_level_\d+__$'
return re.match(pattern, name) is not None
_pandas_logical_type_map = {
'date': 'datetime64[D]',
'datetime': 'datetime64[ns]',
'unicode': np.unicode_,
'bytes': np.bytes_,
'string': np.str_,
'empty': np.object_,
}
def _pandas_type_to_numpy_type(pandas_type):
"""Get the numpy dtype that corresponds to a pandas type.
Parameters
----------
pandas_type : str
The result of a call to pandas.lib.infer_dtype.
Returns
-------
dtype : np.dtype
The dtype that corresponds to `pandas_type`.
"""
try:
return _pandas_logical_type_map[pandas_type]
except KeyError:
if 'mixed' in pandas_type:
# catching 'mixed', 'mixed-integer' and 'mixed-integer-float'
return np.object_
return np.dtype(pandas_type)
def _get_multiindex_codes(mi):
# compat for pandas < 0.24 (MI labels renamed to codes).
if isinstance(mi, _pandas_api.pd.MultiIndex):
return mi.codes if hasattr(mi, 'codes') else mi.labels
else:
return None
def _reconstruct_columns_from_metadata(columns, column_indexes):
"""Construct a pandas MultiIndex from `columns` and column index metadata
in `column_indexes`.
Parameters
----------
columns : List[pd.Index]
The columns coming from a pyarrow.Table
column_indexes : List[Dict[str, str]]
The column index metadata deserialized from the JSON schema metadata
in a :class:`~pyarrow.Table`.
Returns
-------
result : MultiIndex
The index reconstructed using `column_indexes` metadata with levels of
the correct type.
Notes
-----
* Part of :func:`~pyarrow.pandas_compat.table_to_blockmanager`
"""
pd = _pandas_api.pd
# Get levels and labels, and provide sane defaults if the index has a
# single level to avoid if/else spaghetti.
levels = getattr(columns, 'levels', None) or [columns]
labels = _get_multiindex_codes(columns) or [
pd.RangeIndex(len(level)) for level in levels
]
# Convert each level to the dtype provided in the metadata
levels_dtypes = [
(level, col_index.get('pandas_type', str(level.dtype)))
for level, col_index in zip_longest(
levels, column_indexes, fillvalue={}
)
]
new_levels = []
encoder = operator.methodcaller('encode', 'UTF-8')
for level, pandas_dtype in levels_dtypes:
dtype = _pandas_type_to_numpy_type(pandas_dtype)
# Since our metadata is UTF-8 encoded, Python turns things that were
# bytes into unicode strings when json.loads-ing them. We need to
# convert them back to bytes to preserve metadata.
if dtype == np.bytes_:
level = level.map(encoder)
elif level.dtype != dtype:
level = level.astype(dtype)
new_levels.append(level)
return pd.MultiIndex(new_levels, labels, names=columns.names)
def _table_to_blocks(options, block_table, categories, extension_columns):
# Part of table_to_blockmanager
# Convert an arrow table to Block from the internal pandas API
result = pa.lib.table_to_blocks(options, block_table, categories,
extension_columns)
# Defined above
return [_reconstruct_block(item) for item in result]
def _flatten_single_level_multiindex(index):
pd = _pandas_api.pd
if isinstance(index, pd.MultiIndex) and index.nlevels == 1:
levels, = index.levels
labels, = _get_multiindex_codes(index)
# Cheaply check that we do not somehow have duplicate column names
if not index.is_unique:
raise ValueError('Found non-unique column index')
return pd.Index([levels[_label] if _label != -1 else None
for _label in labels],
name=index.names[0])
return index
def _add_any_metadata(table, pandas_metadata):
modified_columns = {}
modified_fields = {}
schema = table.schema
index_columns = pandas_metadata['index_columns']
# only take index columns into account if they are an actual table column
index_columns = [idx_col for idx_col in index_columns
if isinstance(idx_col, six.string_types)]
n_index_levels = len(index_columns)
n_columns = len(pandas_metadata['columns']) - n_index_levels
# Add time zones
for i, col_meta in enumerate(pandas_metadata['columns']):
raw_name = col_meta.get('field_name')
if not raw_name:
# deal with metadata written with arrow < 0.8 or fastparquet
raw_name = col_meta['name']
if i >= n_columns:
# index columns
raw_name = index_columns[i - n_columns]
if raw_name is None:
raw_name = 'None'
idx = schema.get_field_index(raw_name)
if idx != -1:
if col_meta['pandas_type'] == 'datetimetz':
col = table[idx]
converted = col.to_pandas()
tz = col_meta['metadata']['timezone']
tz_aware_type = pa.timestamp('ns', tz=tz)
with_metadata = pa.Array.from_pandas(converted,
type=tz_aware_type)
modified_fields[idx] = pa.field(schema[idx].name,
tz_aware_type)
modified_columns[idx] = with_metadata
if len(modified_columns) > 0:
columns = []
fields = []
for i in range(len(table.schema)):
if i in modified_columns:
columns.append(modified_columns[i])
fields.append(modified_fields[i])
else:
columns.append(table[i])
fields.append(table.schema[i])
return pa.Table.from_arrays(columns, schema=pa.schema(fields))
else:
return table
# ----------------------------------------------------------------------
# Helper functions used in lib
def make_tz_aware(series, tz):
"""
Make a datetime64 Series timezone-aware for the given tz
"""
tz = pa.lib.string_to_tzinfo(tz)
series = (series.dt.tz_localize('utc')
.dt.tz_convert(tz))
return series
|
the-stack_0_18613 | from aip import AipOcr
""" 你的 APPID AK SK """
APP_ID = '11156578'
API_KEY = '3K73kH6H4aGoZbUrE1N0oTO5'
SECRET_KEY = 'YoL5g6BCnWG4mQvEo0TjyDPozlySdDRp'
client = AipOcr(APP_ID, API_KEY, SECRET_KEY)
""" 读取图片 """
def get_file_content(filePath):
with open(filePath, 'rb') as fp:
return fp.read()
image = get_file_content('c:/1.png')
""" 调用网络图片文字识别, 图片参数为本地图片 """
client.webImage(image);
""" 如果有可选参数 """
options = {}
options["detect_direction"] = "true"
options["detect_language"] = "true"
""" 带参数调用网络图片文字识别, 图片参数为本地图片 """
result1 = client.webImage(image, options)
print(result1)
url = "https//www.x.com/sample.jpg"
""" 调用网络图片文字识别, 图片参数为远程url图片 """
client.webImageUrl(url);
""" 如果有可选参数 """
options = {}
options["detect_direction"] = "true"
options["detect_language"] = "true"
""" 带参数调用网络图片文字识别, 图片参数为远程url图片 """
client.webImageUrl(url, options)
|
the-stack_0_18615 | from django.urls import path
from trojsten.special.plugin_prask_8_1_1 import views
app_name = "plugin_zergbot"
urlpatterns = [
path("", views.index, name="root"),
path("levels/", views.levels),
path("levels/s<int:sid>l<int:lid>/", views.level),
path("solutions/s<int:sid>l<int:lid>/", views.solution),
]
|
the-stack_0_18616 | import math
import numpy as np
from sklearn import linear_model
from sklearn.metrics import mean_squared_error
from neuraxle.api import DeepLearningPipeline
from neuraxle.metaopt.deprecated import RandomSearch
from neuraxle.steps.sklearn import SKLearnWrapper
N_ITER = 1
TIMESTEPS = 10
VALIDATION_SIZE = 0.1
BATCH_SIZE = 32
N_EPOCHS = 15
DATA_INPUTS_PAST_SHAPE = (BATCH_SIZE, TIMESTEPS)
def test_deep_learning_pipeline():
# Given
data_inputs, expected_outputs = create_2d_data()
p = DeepLearningPipeline(
SKLearnWrapper(linear_model.LinearRegression()),
validation_size=VALIDATION_SIZE,
batch_size=BATCH_SIZE,
batch_metrics={'mse': to_numpy_metric_wrapper(mean_squared_error)},
shuffle_in_each_epoch_at_train=True,
n_epochs=N_EPOCHS,
epochs_metrics={'mse': to_numpy_metric_wrapper(mean_squared_error)},
scoring_function=to_numpy_metric_wrapper(mean_squared_error),
)
# When
p, outputs = p.fit_transform(data_inputs, expected_outputs)
metrics = p.apply('get_metrics')
# Then
batch_mse_train = metrics['DeepLearningPipeline__EpochRepeater__validation_split_wrapper__epoch_metrics']['train']['mse']
epoch_mse_train = metrics['DeepLearningPipeline__EpochRepeater__validation_split_wrapper__epoch_metrics__TrainShuffled__MiniBatchSequentialPipeline__batch_metrics']['train']['mse']
epoch_mse_validation = metrics['DeepLearningPipeline__EpochRepeater__validation_split_wrapper__epoch_metrics']['validation']['mse']
assert len(epoch_mse_train) == N_EPOCHS
assert len(epoch_mse_validation) == N_EPOCHS
expected_len_batch_mse = math.ceil((len(data_inputs) / BATCH_SIZE) * (1 - VALIDATION_SIZE)) * N_EPOCHS
assert len(batch_mse_train) == expected_len_batch_mse
def test_deep_learning_pipeline_with_random_search():
# Given
data_inputs, expected_outputs = create_2d_data()
p = RandomSearch(DeepLearningPipeline(
SKLearnWrapper(linear_model.LinearRegression()),
batch_size=BATCH_SIZE,
batch_metrics={'mse': to_numpy_metric_wrapper(mean_squared_error)},
shuffle_in_each_epoch_at_train=True,
n_epochs=N_EPOCHS,
epochs_metrics={'mse': to_numpy_metric_wrapper(mean_squared_error)},
scoring_function=to_numpy_metric_wrapper(mean_squared_error),
validation_size=0.15
), n_iter=N_ITER)
# When
p, outputs = p.fit_transform(data_inputs, expected_outputs)
best_model = p.get_best_model()
best_model.set_train(False)
best_model.apply('disable_metrics')
# Then
outputs = best_model.transform(data_inputs)
mse = ((outputs - expected_outputs) ** 2).mean()
assert mse < 2
def create_2d_data():
i = 0
data_inputs = []
for batch_index in range(BATCH_SIZE):
batch = []
for _ in range(TIMESTEPS):
batch.append(i)
i += 1
data_inputs.append(batch)
data_inputs = np.array(data_inputs)
random_noise = np.random.random(DATA_INPUTS_PAST_SHAPE)
expected_outputs = 3 * data_inputs + 4 * random_noise
expected_outputs = expected_outputs.astype(np.float32)
data_inputs = data_inputs.astype(np.float32)
return data_inputs, expected_outputs
def to_numpy_metric_wrapper(metric_fun):
def metric(data_inputs, expected_outputs):
return metric_fun(np.array(data_inputs), np.array(expected_outputs))
return metric
|
the-stack_0_18617 | # Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""DQN agent network components and implementation."""
# pylint: disable=g-bad-import-order
import typing
from typing import Any, Callable, Tuple, Union
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
Network = hk.Transformed
Params = hk.Params
NetworkFn = Callable[..., Any]
class QNetworkOutputs(typing.NamedTuple):
q_values: jnp.ndarray
class MultiHeadQNetworkOutputs(typing.NamedTuple):
q_values: jnp.ndarray
multi_head_output: jnp.ndarray
random_head_q_value: jnp.ndarray
class IqnInputs(typing.NamedTuple):
state: jnp.ndarray
taus: jnp.ndarray
class IqnOutputs(typing.NamedTuple):
q_values: jnp.ndarray
q_dist: jnp.ndarray
class QRNetworkOutputs(typing.NamedTuple):
q_values: jnp.ndarray
q_dist: jnp.ndarray
class C51NetworkOutputs(typing.NamedTuple):
q_values: jnp.ndarray
q_logits: jnp.ndarray
def _dqn_default_initializer(
num_input_units: int) -> hk.initializers.Initializer:
"""Default initialization scheme inherited from past implementations of DQN.
This scheme was historically used to initialize all weights and biases
in convolutional and linear layers of DQN-type agents' networks.
It initializes each weight as an independent uniform sample from [`-c`, `c`],
where `c = 1 / np.sqrt(num_input_units)`, and `num_input_units` is the number
of input units affecting a single output unit in the given layer, i.e. the
total number of inputs in the case of linear (dense) layers, and
`num_input_channels * kernel_width * kernel_height` in the case of
convolutional layers.
Args:
num_input_units: number of input units to a single output unit of the layer.
Returns:
Haiku weight initializer.
"""
max_val = np.sqrt(1 / num_input_units)
return hk.initializers.RandomUniform(-max_val, max_val)
def conv(
num_features: int,
kernel_shape: Union[int, Tuple[int, int]],
stride: Union[int, Tuple[int, int]],
) -> NetworkFn:
"""Convolutional layer with DQN's legacy weight initialization scheme."""
def net_fn(inputs):
"""Function representing conv layer with DQN's legacy initialization."""
num_input_units = inputs.shape[-1] * kernel_shape[0] * kernel_shape[1]
initializer = _dqn_default_initializer(num_input_units)
layer = hk.Conv2D(
num_features,
kernel_shape=kernel_shape,
stride=stride,
w_init=initializer,
b_init=initializer,
padding='VALID')
return layer(inputs)
return net_fn
def linear(num_outputs: int, with_bias=True) -> NetworkFn:
"""Linear layer with DQN's legacy weight initialization scheme."""
def net_fn(inputs):
"""Function representing linear layer with DQN's legacy initialization."""
initializer = _dqn_default_initializer(inputs.shape[-1])
layer = hk.Linear(
num_outputs,
with_bias=with_bias,
w_init=initializer,
b_init=initializer)
return layer(inputs)
return net_fn
def linear_with_shared_bias(num_outputs: int) -> NetworkFn:
"""Linear layer with single shared bias instead of one bias per output."""
def layer_fn(inputs):
"""Function representing a linear layer with single shared bias."""
initializer = _dqn_default_initializer(inputs.shape[-1])
bias_free_linear = hk.Linear(
num_outputs, with_bias=False, w_init=initializer)
linear_output = bias_free_linear(inputs)
bias = hk.get_parameter('b', [1], inputs.dtype, init=initializer)
bias = jnp.broadcast_to(bias, linear_output.shape)
return linear_output + bias
return layer_fn
def noisy_linear(num_outputs: int,
weight_init_stddev: float,
with_bias: bool = True) -> NetworkFn:
"""Linear layer with weight randomization http://arxiv.org/abs/1706.10295."""
def make_noise_sqrt(rng, shape):
noise = jax.random.truncated_normal(rng, lower=-2., upper=2., shape=shape)
return jax.lax.stop_gradient(jnp.sign(noise) * jnp.sqrt(jnp.abs(noise)))
def net_fn(inputs):
"""Function representing a linear layer with learned noise distribution."""
num_inputs = inputs.shape[-1]
mu_initializer = _dqn_default_initializer(num_inputs)
mu_layer = hk.Linear(
num_outputs,
name='mu',
with_bias=with_bias,
w_init=mu_initializer,
b_init=mu_initializer)
sigma_initializer = hk.initializers.Constant( #
weight_init_stddev / jnp.sqrt(num_inputs))
sigma_layer = hk.Linear(
num_outputs,
name='sigma',
with_bias=True,
w_init=sigma_initializer,
b_init=sigma_initializer)
# Broadcast noise over batch dimension.
input_noise_sqrt = make_noise_sqrt(hk.next_rng_key(), [1, num_inputs])
output_noise_sqrt = make_noise_sqrt(hk.next_rng_key(), [1, num_outputs])
# Factorized Gaussian noise.
mu = mu_layer(inputs)
noisy_inputs = input_noise_sqrt * inputs
sigma = sigma_layer(noisy_inputs) * output_noise_sqrt
return mu + sigma
return net_fn
def dqn_torso() -> NetworkFn:
"""DQN convolutional torso.
Includes scaling from [`0`, `255`] (`uint8`) to [`0`, `1`] (`float32`)`.
Returns:
Network function that `haiku.transform` can be called on.
"""
def net_fn(inputs):
"""Function representing convolutional torso for a DQN Q-network."""
network = hk.Sequential([
lambda x: x.astype(jnp.float32) / 255.,
conv(32, kernel_shape=(8, 8), stride=(4, 4)),
jax.nn.relu,
conv(64, kernel_shape=(4, 4), stride=(2, 2)),
jax.nn.relu,
conv(64, kernel_shape=(3, 3), stride=(1, 1)),
jax.nn.relu,
hk.Flatten(),
])
return network(inputs)
return net_fn
def dqn_value_head(num_actions: int, shared_bias: bool = False) -> NetworkFn:
"""Regular DQN Q-value head with single hidden layer."""
last_layer = linear_with_shared_bias if shared_bias else linear
def net_fn(inputs):
"""Function representing value head for a DQN Q-network."""
network = hk.Sequential([
linear(512),
jax.nn.relu,
last_layer(num_actions),
])
return network(inputs)
return net_fn
def rainbow_atari_network(
num_actions: int,
support: jnp.ndarray,
noisy_weight_init: float,
) -> NetworkFn:
"""Rainbow network, expects `uint8` input."""
if support.ndim != 1:
raise ValueError('support should be 1D.')
num_atoms = len(support)
support = support[None, None, :]
def net_fn(inputs):
"""Function representing Rainbow Q-network."""
inputs = dqn_torso()(inputs)
# Advantage head.
advantage = noisy_linear(512, noisy_weight_init, with_bias=True)(inputs)
advantage = jax.nn.relu(advantage)
advantage = noisy_linear(
num_actions * num_atoms, noisy_weight_init, with_bias=False)(
advantage)
advantage = jnp.reshape(advantage, (-1, num_actions, num_atoms))
# Value head.
value = noisy_linear(512, noisy_weight_init, with_bias=True)(inputs)
value = jax.nn.relu(value)
value = noisy_linear(num_atoms, noisy_weight_init, with_bias=False)(value)
value = jnp.reshape(value, (-1, 1, num_atoms))
# Q-distribution and values.
q_logits = value + advantage - jnp.mean(advantage, axis=-2, keepdims=True)
assert q_logits.shape[1:] == (num_actions, num_atoms)
q_dist = jax.nn.softmax(q_logits)
q_values = jnp.sum(q_dist * support, axis=2)
q_values = jax.lax.stop_gradient(q_values)
return C51NetworkOutputs(q_logits=q_logits, q_values=q_values)
return net_fn
def iqn_atari_network(num_actions: int, latent_dim: int) -> NetworkFn:
"""IQN network, expects `uint8` input."""
def net_fn(iqn_inputs):
"""Function representing IQN-DQN Q-network."""
state = iqn_inputs.state # batch x state_shape
taus = iqn_inputs.taus # batch x samples
# Apply DQN convnet to embed state.
state_embedding = dqn_torso()(state)
state_dim = state_embedding.shape[-1]
# Embed taus with cosine embedding + linear layer.
# cos(pi * i * tau) for i = 1,...,latents for each batch_element x sample.
# Broadcast everything to batch x samples x latent_dim.
pi_multiples = jnp.arange(1, latent_dim + 1, dtype=jnp.float32) * jnp.pi
tau_embedding = jnp.cos(pi_multiples[None, None, :] * taus[:, :, None])
# Map tau embedding onto state_dim via linear layer.
embedding_layer = linear(state_dim)
tau_embedding = hk.BatchApply(embedding_layer)(tau_embedding)
tau_embedding = jax.nn.relu(tau_embedding)
# Reshape/broadcast both embeddings to batch x num_samples x state_dim
# and multiply together, before applying value head.
head_input = tau_embedding * state_embedding[:, None, :]
value_head = dqn_value_head(num_actions)
q_dist = hk.BatchApply(value_head)(head_input)
q_values = jnp.mean(q_dist, axis=1)
q_values = jax.lax.stop_gradient(q_values)
return IqnOutputs(q_dist=q_dist, q_values=q_values)
return net_fn
def qr_atari_network(num_actions: int, quantiles: jnp.ndarray) -> NetworkFn:
"""QR-DQN network, expects `uint8` input."""
if quantiles.ndim != 1:
raise ValueError('quantiles has to be 1D.')
num_quantiles = len(quantiles)
def net_fn(inputs):
"""Function representing QR-DQN Q-network."""
network = hk.Sequential([
dqn_torso(),
dqn_value_head(num_quantiles * num_actions),
])
network_output = network(inputs)
q_dist = jnp.reshape(network_output, (-1, num_quantiles, num_actions))
q_values = jnp.mean(q_dist, axis=1)
q_values = jax.lax.stop_gradient(q_values)
return QRNetworkOutputs(q_dist=q_dist, q_values=q_values)
return net_fn
def c51_atari_network(num_actions: int, support: jnp.ndarray) -> NetworkFn:
"""C51 network, expects `uint8` input."""
if support.ndim != 1:
raise ValueError('support has to be 1D.')
num_atoms = len(support)
def net_fn(inputs):
"""Function representing C51 Q-network."""
network = hk.Sequential([
dqn_torso(),
dqn_value_head(num_actions * num_atoms),
])
network_output = network(inputs)
q_logits = jnp.reshape(network_output, (-1, num_actions, num_atoms))
q_dist = jax.nn.softmax(q_logits)
q_values = jnp.sum(q_dist * support[None, None, :], axis=2)
q_values = jax.lax.stop_gradient(q_values)
return C51NetworkOutputs(q_logits=q_logits, q_values=q_values)
return net_fn
def double_dqn_atari_network(num_actions: int) -> NetworkFn:
"""DQN network with shared bias in final layer, expects `uint8` input."""
def net_fn(inputs):
"""Function representing DQN Q-network with shared bias output layer."""
network = hk.Sequential([
dqn_torso(),
dqn_value_head(num_actions, shared_bias=True),
])
return QNetworkOutputs(q_values=network(inputs))
return net_fn
def dqn_atari_network(num_actions: int) -> NetworkFn:
"""DQN network, expects `uint8` input."""
def net_fn(inputs):
"""Function representing DQN Q-network."""
network = hk.Sequential([
dqn_torso(),
dqn_value_head(num_actions),
])
return QNetworkOutputs(q_values=network(inputs))
return net_fn
def bootstrapped_dqn_multi_head_network(
num_actions: int,
num_heads: int,
mask_probability: float
):
"""DQN network with multiple heads (representing ensemble)."""
binomial_probabilities = jnp.array([mask_probability, 1 - mask_probability])
def net_fn(inputs):
"""Function representing multi-head DQN Q-network."""
network = hk.Sequential([
dqn_torso(),
dqn_value_head(num_heads * num_actions),
])
network_output = network(inputs)
multi_head_output = jnp.reshape(network_output, (-1, num_heads, num_actions))
mask = jax.random.choice(key=hk.next_rng_key(), a=2, shape=(multi_head_output.shape[0], num_heads,), p=binomial_probabilities)
random_head_indices = jax.random.choice(key=hk.next_rng_key(), a=num_heads, shape=(multi_head_output.shape[0], ))
random_head_q_value = jnp.reshape(multi_head_output[:, random_head_indices], (-1, num_actions))
# TODO: make the q values (used for eval) the output of voting or weighted mean.
# Currently random head q value used as placeholder
return MultiHeadQNetworkOutputs(
q_values=random_head_q_value,
multi_head_output=multi_head_output,
random_head_q_value=random_head_q_value
)
return net_fn
|
the-stack_0_18619 | # -*- coding: utf-8 -*-
import math
from Dependencies.XLoBorg import XLoBorg
from Robot.Sensors.Real.Sensor import Sensor
class Magnetometer(Sensor):
###
# This represents the Magnetometer Sensor class.
# Magnetometer senses strong magnetic fields. Used to navigate and know where North is
#
# @param int heading - initial heading
#
##
def __init__(self):
super(Magnetometer, self).__init__()
XLoBorg.Init()
self.updateHeading()
###
# We use this to check if the Magnetometer sensor is active or not.
#
# @raise ValueError
#
##
def _before(self):
if not self.isActive:
raise ValueError('Magnetometer Sensor is not Active')
###
# Gets the current heading of the robot.
#
# @return heading
#
##
def getHeading(self) -> float:
self._before()
self.updateHeading()
return self.heading
###
# Updates the current heading of the robot.
#
# @param int heading
#
##
def updateHeading(self, heading: int = 0):
x, y, z = XLoBorg.ReadCompassRaw()
heading = math.atan2(y, x)
heading = heading * 180 / math.pi
if heading < 0:
heading = heading + 360
self.heading = heading
|
the-stack_0_18620 | # coding=utf-8
import unittest
from mock import patch
import sys
import logging
from mocking.IndigoDevice import IndigoDevice
from mocking.IndigoServer import Indigo
from mocking.IndigoAction import IndigoAction
indigo = Indigo()
sys.modules['indigo'] = indigo
from Devices.Bulbs.Shelly_Bulb_Duo import Shelly_Bulb_Duo
class Test_Shelly_Bulb_Duo(unittest.TestCase):
def setUp(self):
indigo.__init__()
self.device = IndigoDevice(id=123456, name="New Device")
self.shelly = Shelly_Bulb_Duo(self.device)
logging.getLogger('Plugin.ShellyMQTT').addHandler(logging.NullHandler())
self.device.pluginProps['address'] = "shellies/shelly-bulb-duo-test"
self.device.updateStateOnServer("overload", False)
self.device.updateStateOnServer("ip-address", None)
self.device.updateStateOnServer("mac-address", None)
self.device.updateStateOnServer("online", False)
self.device.updateStateOnServer("curEnergyLevel", 0)
self.device.updateStateOnServer("brightnessLevel", 0)
self.device.updateStateOnServer("whiteLevel", 100)
self.device.updateStateOnServer("whiteTemperature", 2700)
def test_getSubscriptions_no_address(self):
"""Test getting subscriptions with no address defined."""
self.device.pluginProps['address'] = None
self.assertListEqual([], self.shelly.getSubscriptions())
def test_getSubscriptions(self):
"""Test getting subscriptions with a defined address."""
topics = [
"shellies/announce",
"shellies/shelly-bulb-duo-test/online",
"shellies/shelly-bulb-duo-test/light/0/status",
"shellies/shelly-bulb-duo-test/light/0/power",
"shellies/shelly-bulb-duo-test/light/0/energy"
]
self.assertListEqual(topics, self.shelly.getSubscriptions())
def test_handleMessage_status_invalid(self):
"""Test getting invalid status data."""
self.assertRaises(ValueError, self.shelly.handleMessage("shellies/shelly-bulb-duo-test/light/0/status", '{"ison": true, "mo'))
def test_handleMessage_light_on(self):
"""Test getting a light on message."""
self.assertTrue(self.shelly.isOff())
self.shelly.handleMessage("shellies/shelly-bulb-duo-test/light/0/status", '{"ison": true, "white": 100, "temp": 5000, "brightness": 100}')
self.assertTrue(self.shelly.isOn())
self.assertFalse(self.shelly.device.states['overload'])
self.assertEqual(100, self.shelly.device.states['brightnessLevel'])
def test_handleMessage_light_off(self):
"""Test getting a light off message."""
self.shelly.turnOn()
self.assertTrue(self.shelly.isOn())
self.shelly.handleMessage("shellies/shelly-bulb-duo-test/light/0/status", '{"ison": false, "white": 100, "temp": 5000, "brightness": 100}')
self.assertTrue(self.shelly.isOff())
self.assertFalse(self.shelly.device.states['overload'])
self.assertEqual(0, self.shelly.device.states['brightnessLevel'])
def test_handleMessage_overpower(self):
"""Test getting a relay overpower message."""
self.assertFalse(self.shelly.device.states['overload'])
self.shelly.handleMessage("shellies/shelly-bulb-duo-test/overload", "1")
self.assertTrue(self.shelly.device.states['overload'])
def test_handleMessage_power(self):
self.shelly.handleMessage("shellies/shelly-bulb-duo-test/relay/0/power", "0")
self.assertEqual("0", self.shelly.device.states['curEnergyLevel'])
self.assertEqual("0 W", self.shelly.device.states_meta['curEnergyLevel']['uiValue'])
self.shelly.handleMessage("shellies/shelly-bulb-duo-test/relay/0/power", "101.123")
self.assertEqual("101.123", self.shelly.device.states['curEnergyLevel'])
self.assertEqual("101.123 W", self.shelly.device.states_meta['curEnergyLevel']['uiValue'])
def test_handleMessage_energy(self):
self.shelly.handleMessage("shellies/shelly-bulb-duo-test/relay/0/energy", "0")
self.assertAlmostEqual(0.0000, self.shelly.device.states['accumEnergyTotal'], 4)
self.shelly.handleMessage("shellies/shelly-bulb-duo-test/relay/0/energy", "50")
self.assertAlmostEqual(0.0008, self.shelly.device.states['accumEnergyTotal'], 4)
def test_handleMessage_announce(self):
announcement = '{"id": "shelly-bulb-duo-test", "mac": "aa:bb:cc:ee", "ip": "192.168.1.101", "fw_ver": "0.1.0", "new_fw": false}'
self.shelly.handleMessage("shellies/announce", announcement)
self.assertEqual("aa:bb:cc:ee", self.shelly.device.states['mac-address'])
self.assertEqual("192.168.1.101", self.shelly.getIpAddress())
self.assertEqual("0.1.0", self.shelly.getFirmware())
self.assertFalse(self.shelly.updateAvailable())
def test_handleMessage_online_true(self):
self.assertFalse(self.shelly.device.states['online'])
self.shelly.handleMessage("shellies/shelly-bulb-duo-test/online", "true")
self.assertTrue(self.shelly.device.states['online'])
def test_handleMessage_online_false(self):
self.shelly.device.states['online'] = True
self.assertTrue(self.shelly.device.states['online'])
self.shelly.handleMessage("shellies/shelly-bulb-duo-test/online", "false")
self.assertFalse(self.shelly.device.states['online'])
@patch('Devices.Shelly.Shelly.publish')
def test_handleAction_turn_on(self, publish):
self.shelly.turnOff()
self.assertTrue(self.shelly.isOff())
turnOn = IndigoAction(indigo.kDeviceAction.TurnOn)
self.shelly.handleAction(turnOn)
self.assertTrue(self.shelly.isOn())
publish.assert_called_with("shellies/shelly-bulb-duo-test/light/0/set", '{"turn": "on", "temp": 2700, "brightness": 100}')
@patch('Devices.Shelly.Shelly.publish')
def test_handleAction_turn_off(self, publish):
self.shelly.turnOn()
self.assertTrue(self.shelly.isOn())
turnOff = IndigoAction(indigo.kDeviceAction.TurnOff)
self.shelly.handleAction(turnOff)
self.assertTrue(self.shelly.isOff())
publish.assert_called_with("shellies/shelly-bulb-duo-test/light/0/set", '{"turn": "off", "temp": 2700, "brightness": 0}')
@patch('Devices.Shelly.Shelly.publish')
def test_handleAction_status_request(self, publish):
statusRequest = IndigoAction(indigo.kDeviceAction.RequestStatus)
self.shelly.handleAction(statusRequest)
publish.assert_called_with("shellies/shelly-bulb-duo-test/command", "update")
@patch('Devices.Shelly.Shelly.publish')
def test_handleAction_toggle_off_to_on(self, publish):
self.shelly.turnOff()
self.assertTrue(self.shelly.isOff())
toggle = IndigoAction(indigo.kDeviceAction.Toggle)
self.shelly.handleAction(toggle)
self.assertTrue(self.shelly.isOn())
publish.assert_called_with("shellies/shelly-bulb-duo-test/light/0/set", '{"turn": "on", "temp": 2700, "brightness": 100}')
@patch('Devices.Shelly.Shelly.publish')
def test_handleAction_toggle_on_to_off(self, publish):
self.shelly.turnOn()
self.assertTrue(self.shelly.isOn())
toggle = IndigoAction(indigo.kDeviceAction.Toggle)
self.shelly.handleAction(toggle)
self.assertTrue(self.shelly.isOff())
publish.assert_called_with("shellies/shelly-bulb-duo-test/light/0/set", '{"turn": "off", "temp": 2700, "brightness": 0}')
@patch('Devices.Shelly.Shelly.publish')
def test_handleAction_reset_energy(self, publish):
self.shelly.updateEnergy(30)
self.assertAlmostEqual(0.0005, self.shelly.device.states['accumEnergyTotal'], 4)
resetEnergy = IndigoAction(indigo.kUniversalAction.EnergyReset)
self.shelly.handleAction(resetEnergy)
self.assertAlmostEqual(0.0000, self.shelly.device.states['accumEnergyTotal'], 4)
@patch('Devices.Shelly.Shelly.publish')
def test_handleAction_update_energy(self, publish):
updateEnergy = IndigoAction(indigo.kDeviceAction.RequestStatus)
self.shelly.handleAction(updateEnergy)
publish.assert_called_with("shellies/shelly-bulb-duo-test/command", "update")
@patch('Devices.Shelly.Shelly.publish')
def test_handleAction_setBrightness(self, publish):
self.assertEqual(0, self.shelly.device.states['brightnessLevel'])
setBrightness = IndigoAction(indigo.kDeviceAction.SetBrightness, actionValue=50)
self.shelly.handleAction(setBrightness)
self.assertTrue(self.shelly.isOn())
self.assertEqual(50, self.shelly.device.states['brightnessLevel'])
publish.assert_called_with("shellies/shelly-bulb-duo-test/light/0/set", '{"turn": "on", "temp": 2700, "brightness": 50}')
@patch('Devices.Shelly.Shelly.publish')
def test_handleAction_brightenBy(self, publish):
self.assertEqual(0, self.shelly.device.states['brightnessLevel'])
brightenBy = IndigoAction(indigo.kDeviceAction.BrightenBy, actionValue=25)
self.shelly.handleAction(brightenBy)
self.assertTrue(self.shelly.isOn())
self.assertEqual(25, self.shelly.device.states['brightnessLevel'])
publish.assert_called_with("shellies/shelly-bulb-duo-test/light/0/set", '{"turn": "on", "temp": 2700, "brightness": 25}')
self.shelly.handleAction(brightenBy)
self.assertTrue(self.shelly.isOn())
self.assertEqual(50, self.shelly.device.states['brightnessLevel'])
publish.assert_called_with("shellies/shelly-bulb-duo-test/light/0/set", '{"turn": "on", "temp": 2700, "brightness": 50}')
@patch('Devices.Shelly.Shelly.publish')
def test_handleAction_brightenBy_more_than_100(self, publish):
self.shelly.device.updateStateOnServer('brightnessLevel', 90)
brightenBy = IndigoAction(indigo.kDeviceAction.BrightenBy, actionValue=25)
self.shelly.handleAction(brightenBy)
self.assertTrue(self.shelly.isOn())
self.assertEqual(100, self.shelly.device.states['brightnessLevel'])
publish.assert_called_with("shellies/shelly-bulb-duo-test/light/0/set", '{"turn": "on", "temp": 2700, "brightness": 100}')
@patch('Devices.Shelly.Shelly.publish')
def test_handleAction_dimBy(self, publish):
self.shelly.device.updateStateOnServer('brightnessLevel', 100)
dimBy = IndigoAction(indigo.kDeviceAction.DimBy, actionValue=25)
self.shelly.handleAction(dimBy)
self.assertTrue(self.shelly.isOn())
self.assertEqual(75, self.shelly.device.states['brightnessLevel'])
publish.assert_called_with("shellies/shelly-bulb-duo-test/light/0/set", '{"turn": "on", "temp": 2700, "brightness": 75}')
self.shelly.handleAction(dimBy)
self.assertTrue(self.shelly.isOn())
self.assertEqual(50, self.shelly.device.states['brightnessLevel'])
publish.assert_called_with("shellies/shelly-bulb-duo-test/light/0/set", '{"turn": "on", "temp": 2700, "brightness": 50}')
@patch('Devices.Shelly.Shelly.publish')
def test_handleAction_dimBy_less_than_0(self, publish):
self.shelly.device.updateStateOnServer('brightnessLevel', 10)
dimBy = IndigoAction(indigo.kDeviceAction.DimBy, actionValue=25)
self.shelly.handleAction(dimBy)
self.assertTrue(self.shelly.isOff())
self.assertEqual(0, self.shelly.device.states['brightnessLevel'])
publish.assert_called_with("shellies/shelly-bulb-duo-test/light/0/set", '{"turn": "off", "temp": 2700, "brightness": 0}')
@patch('Devices.Shelly.Shelly.publish')
def test_handleAction_setColorLevels(self, publish):
self.shelly.turnOn()
self.shelly.device.updateStateOnServer('brightnessLevel', 100)
self.shelly.device.updateStateOnServer('whiteLevel', 10)
self.shelly.device.updateStateOnServer('whiteTemperature', 10)
setColorLevels = IndigoAction(indigo.kDeviceAction.SetColorLevels, actionValue={'whiteLevel': 50, 'whiteTemperature': 6500})
self.shelly.handleAction(setColorLevels)
self.assertTrue(self.shelly.isOn())
self.assertEqual(50, self.shelly.device.states['whiteLevel'])
self.assertEqual(6500, self.shelly.device.states['whiteTemperature'])
publish.assert_called_with("shellies/shelly-bulb-duo-test/light/0/set", '{"turn": "on", "temp": 6500, "brightness": 100}')
def test_apply_brightness_off(self):
self.shelly.turnOn()
self.assertTrue(self.shelly.isOn())
self.shelly.applyBrightness(0)
self.assertTrue(self.shelly.isOff())
self.assertEqual(0, self.shelly.device.brightness)
def test_apply_brightness_on(self):
self.assertTrue(self.shelly.isOff())
self.shelly.applyBrightness(50)
self.assertTrue(self.shelly.isOn())
self.assertEqual(50, self.shelly.device.brightness)
self.shelly.applyBrightness(100)
self.assertTrue(self.shelly.isOn())
self.assertEqual(100, self.shelly.device.brightness)
def test_update_state_image_on(self):
self.shelly.turnOn()
self.assertTrue(self.shelly.isOn())
self.assertEqual(indigo.kStateImageSel.DimmerOn, self.shelly.device.image)
def test_update_state_image_off(self):
self.shelly.turnOff()
self.assertTrue(self.shelly.isOff())
self.assertEqual(indigo.kStateImageSel.DimmerOff, self.shelly.device.image)
def test_validateConfigUI(self):
values = {
"broker-id": "12345",
"address": "some/address",
"message-type": "a-type",
"announce-message-type-same-as-message-type": True
}
isValid, valuesDict, errors = Shelly_Bulb_Duo.validateConfigUI(values, None, None)
self.assertTrue(isValid)
def test_validateConfigUI_announce_message_type(self):
values = {
"broker-id": "12345",
"address": "some/address",
"message-type": "a-type",
"announce-message-type-same-as-message-type": False,
"announce-message-type": "another-type"
}
isValid, valuesDict, errors = Shelly_Bulb_Duo.validateConfigUI(values, None, None)
self.assertTrue(isValid)
def test_validateConfigUI_invalid(self):
values = {
"broker-id": "",
"address": "",
"message-type": "",
"announce-message-type-same-as-message-type": False,
"announce-message-type": ""
}
isValid, valuesDict, errors = Shelly_Bulb_Duo.validateConfigUI(values, None, None)
self.assertFalse(isValid)
self.assertTrue("broker-id" in errors)
self.assertTrue("address" in errors)
self.assertTrue("message-type" in errors)
self.assertTrue("announce-message-type" in errors) |
the-stack_0_18621 | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def maxDepth(self, root):
"""
:type root: TreeNode
:rtype: int
"""
# dfs search
if not root:
return 0
#return max(self.maxDepth(root.left),self.maxDepth(root.right))+1
# the recursive method is very straightforward
# in real or interview, need to implement using stack and some advanced traval tech
# not level by level
"""
### simple bfs
stck=[root]
depth=0
# bfs search
while stck:
tmp=[]
depth+=1
for cur in stck:
if cur.left:
tmp.append(cur.left)
if cur.right:
tmp.append(cur.right)
stck=tmp
return depth
"""
### implement not level by level
stck=[root]
depth=0
# record the previous route
prenode=None
while stck:
cur=stck[-1]
# if prenode is above cur node
if not prenode or prenode.left==cur or prenode.right==cur:
# left has priority to add, if--elif not if---if
if cur.left:
stck.append(cur.left)
elif cur.right:
stck.append(cur.right)
# else if the travel back from bottom, the left subtree is completed
elif cur.left==prenode:
if cur.right:
stck.append(cur.right)
# reach the leaf node, prenode and cur reach the same node
else:
stck.pop()
# move pre to next route
prenode=cur
depth=max(depth,len(stck))
return depth
|
the-stack_0_18622 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Retrain the YOLO model for your own dataset.
"""
import os, time, random, argparse
import numpy as np
import tensorflow.keras.backend as K
from tensorflow.keras.utils import multi_gpu_model
from tensorflow.keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau, LearningRateScheduler, EarlyStopping, TerminateOnNaN, LambdaCallback
from tensorflow_model_optimization.sparsity import keras as sparsity
from yolo5.model import get_yolo5_train_model
from yolo5.data import yolo5_data_generator_wrapper, Yolo5DataGenerator
from yolo3.model import get_yolo3_train_model
from yolo3.data import yolo3_data_generator_wrapper, Yolo3DataGenerator
from yolo2.model import get_yolo2_train_model
from yolo2.data import yolo2_data_generator_wrapper, Yolo2DataGenerator
from common.utils import get_classes, get_anchors, get_dataset, optimize_tf_gpu
from common.model_utils import get_optimizer
from common.callbacks import EvalCallBack, DatasetShuffleCallBack
# Try to enable Auto Mixed Precision on TF 2.0
os.environ['TF_ENABLE_AUTO_MIXED_PRECISION'] = '1'
os.environ['TF_AUTO_MIXED_PRECISION_GRAPH_REWRITE_IGNORE_PERFORMANCE'] = '1'
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import tensorflow as tf
optimize_tf_gpu(tf, K)
def main(args):
annotation_file = args.annotation_file
log_dir = os.path.join('logs', '000')
classes_path = args.classes_path
class_names = get_classes(classes_path)
num_classes = len(class_names)
anchors = get_anchors(args.anchors_path)
num_anchors = len(anchors)
# get freeze level according to CLI option
if args.weights_path:
freeze_level = 0
else:
freeze_level = 1
if args.freeze_level is not None:
freeze_level = args.freeze_level
# callbacks for training process
logging = TensorBoard(log_dir=log_dir, histogram_freq=0, write_graph=False, write_grads=False, write_images=False, update_freq='batch')
checkpoint = ModelCheckpoint(os.path.join(log_dir, 'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5'),
monitor='val_loss',
mode='min',
verbose=1,
save_weights_only=False,
save_best_only=True,
period=1)
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5, mode='min', patience=10, verbose=1, cooldown=0, min_lr=1e-10)
early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=50, verbose=1, mode='min')
terminate_on_nan = TerminateOnNaN()
callbacks=[logging, checkpoint, reduce_lr, early_stopping, terminate_on_nan]
# get train&val dataset
dataset = get_dataset(annotation_file)
if args.val_annotation_file:
val_dataset = get_dataset(args.val_annotation_file)
num_train = len(dataset)
num_val = len(val_dataset)
dataset.extend(val_dataset)
else:
val_split = args.val_split
num_val = int(len(dataset)*val_split)
num_train = len(dataset) - num_val
# assign multiscale interval
if args.multiscale:
rescale_interval = args.rescale_interval
else:
rescale_interval = -1 #Doesn't rescale
# model input shape check
input_shape = args.model_image_size
assert (input_shape[0]%32 == 0 and input_shape[1]%32 == 0), 'model_image_size should be multiples of 32'
# get different model type & train&val data generator
if args.model_type.startswith('scaled_yolo4_') or args.model_type.startswith('yolo5_'):
# Scaled-YOLOv4 & YOLOv5 entrance, use yolo5 submodule but now still yolo3 data generator
# TODO: create new yolo5 data generator to apply YOLOv5 anchor assignment
get_train_model = get_yolo5_train_model
data_generator = yolo5_data_generator_wrapper
# tf.keras.Sequence style data generator
#train_data_generator = Yolo5DataGenerator(dataset[:num_train], args.batch_size, input_shape, anchors, num_classes, args.enhance_augment, rescale_interval, args.multi_anchor_assign)
#val_data_generator = Yolo5DataGenerator(dataset[num_train:], args.batch_size, input_shape, anchors, num_classes, multi_anchor_assign=args.multi_anchor_assign)
tiny_version = False
elif args.model_type.startswith('yolo3_') or args.model_type.startswith('yolo4_'):
#if num_anchors == 9:
# YOLOv3 & v4 entrance, use 9 anchors
get_train_model = get_yolo3_train_model
data_generator = yolo3_data_generator_wrapper
# tf.keras.Sequence style data generator
#train_data_generator = Yolo3DataGenerator(dataset[:num_train], args.batch_size, input_shape, anchors, num_classes, args.enhance_augment, rescale_interval, args.multi_anchor_assign)
#val_data_generator = Yolo3DataGenerator(dataset[num_train:], args.batch_size, input_shape, anchors, num_classes, multi_anchor_assign=args.multi_anchor_assign)
tiny_version = False
elif args.model_type.startswith('tiny_yolo3_') or args.model_type.startswith('tiny_yolo4_'):
#elif num_anchors == 6:
# Tiny YOLOv3 & v4 entrance, use 6 anchors
get_train_model = get_yolo3_train_model
data_generator = yolo3_data_generator_wrapper
# tf.keras.Sequence style data generator
#train_data_generator = Yolo3DataGenerator(dataset[:num_train], args.batch_size, input_shape, anchors, num_classes, args.enhance_augment, rescale_interval, args.multi_anchor_assign)
#val_data_generator = Yolo3DataGenerator(dataset[num_train:], args.batch_size, input_shape, anchors, num_classes, multi_anchor_assign=args.multi_anchor_assign)
tiny_version = True
elif args.model_type.startswith('yolo2_') or args.model_type.startswith('tiny_yolo2_'):
#elif num_anchors == 5:
# YOLOv2 & Tiny YOLOv2 use 5 anchors
get_train_model = get_yolo2_train_model
data_generator = yolo2_data_generator_wrapper
# tf.keras.Sequence style data generator
#train_data_generator = Yolo2DataGenerator(dataset[:num_train], args.batch_size, input_shape, anchors, num_classes, args.enhance_augment, rescale_interval)
#val_data_generator = Yolo2DataGenerator(dataset[num_train:], args.batch_size, input_shape, anchors, num_classes)
tiny_version = False
else:
raise ValueError('Unsupported model type')
# prepare online evaluation callback
if args.eval_online:
eval_callback = EvalCallBack(args.model_type, dataset[num_train:], anchors, class_names, args.model_image_size, args.model_pruning, log_dir, eval_epoch_interval=args.eval_epoch_interval, save_eval_checkpoint=args.save_eval_checkpoint, elim_grid_sense=args.elim_grid_sense)
callbacks.append(eval_callback)
# prepare train/val data shuffle callback
if args.data_shuffle:
shuffle_callback = DatasetShuffleCallBack(dataset)
callbacks.append(shuffle_callback)
# prepare model pruning config
pruning_end_step = np.ceil(1.0 * num_train / args.batch_size).astype(np.int32) * args.total_epoch
if args.model_pruning:
pruning_callbacks = [sparsity.UpdatePruningStep(), sparsity.PruningSummaries(log_dir=log_dir, profile_batch=0)]
callbacks = callbacks + pruning_callbacks
# prepare optimizer
optimizer = get_optimizer(args.optimizer, args.learning_rate, decay_type=None)
# support multi-gpu training
if args.gpu_num >= 2:
# devices_list=["/gpu:0", "/gpu:1"]
devices_list=["/gpu:{}".format(n) for n in range(args.gpu_num)]
strategy = tf.distribute.MirroredStrategy(devices=devices_list)
print ('Number of devices: {}'.format(strategy.num_replicas_in_sync))
with strategy.scope():
# get multi-gpu train model
model = get_train_model(args.model_type, anchors, num_classes, weights_path=args.weights_path, freeze_level=freeze_level, optimizer=optimizer, label_smoothing=args.label_smoothing, elim_grid_sense=args.elim_grid_sense, model_pruning=args.model_pruning, pruning_end_step=pruning_end_step)
else:
# get normal train model
model = get_train_model(args.model_type, anchors, num_classes, weights_path=args.weights_path, freeze_level=freeze_level, optimizer=optimizer, label_smoothing=args.label_smoothing, elim_grid_sense=args.elim_grid_sense, model_pruning=args.model_pruning, pruning_end_step=pruning_end_step)
model.summary()
# Transfer training some epochs with frozen layers first if needed, to get a stable loss.
initial_epoch = args.init_epoch
epochs = initial_epoch + args.transfer_epoch
print("Transfer training stage")
print('Train on {} samples, val on {} samples, with batch size {}, input_shape {}.'.format(num_train, num_val, args.batch_size, input_shape))
#model.fit_generator(train_data_generator,
model.fit_generator(data_generator(dataset[:num_train], args.batch_size, input_shape, anchors, num_classes, args.enhance_augment, rescale_interval, multi_anchor_assign=args.multi_anchor_assign),
steps_per_epoch=max(1, num_train//args.batch_size),
#validation_data=val_data_generator,
validation_data=data_generator(dataset[num_train:], args.batch_size, input_shape, anchors, num_classes, multi_anchor_assign=args.multi_anchor_assign),
validation_steps=max(1, num_val//args.batch_size),
epochs=epochs,
initial_epoch=initial_epoch,
#verbose=1,
workers=1,
use_multiprocessing=False,
max_queue_size=10,
callbacks=callbacks)
# Wait 2 seconds for next stage
time.sleep(2)
if args.decay_type:
# rebuild optimizer to apply learning rate decay, only after
# unfreeze all layers
callbacks.remove(reduce_lr)
steps_per_epoch = max(1, num_train//args.batch_size)
decay_steps = steps_per_epoch * (args.total_epoch - args.init_epoch - args.transfer_epoch)
optimizer = get_optimizer(args.optimizer, args.learning_rate, decay_type=args.decay_type, decay_steps=decay_steps)
# Unfreeze the whole network for further tuning
# NOTE: more GPU memory is required after unfreezing the body
print("Unfreeze and continue training, to fine-tune.")
if args.gpu_num >= 2:
with strategy.scope():
for i in range(len(model.layers)):
model.layers[i].trainable = True
model.compile(optimizer=optimizer, loss={'yolo_loss': lambda y_true, y_pred: y_pred}) # recompile to apply the change
else:
for i in range(len(model.layers)):
model.layers[i].trainable = True
model.compile(optimizer=optimizer, loss={'yolo_loss': lambda y_true, y_pred: y_pred}) # recompile to apply the change
print('Train on {} samples, val on {} samples, with batch size {}, input_shape {}.'.format(num_train, num_val, args.batch_size, input_shape))
#model.fit_generator(train_data_generator,
model.fit_generator(data_generator(dataset[:num_train], args.batch_size, input_shape, anchors, num_classes, args.enhance_augment, rescale_interval, multi_anchor_assign=args.multi_anchor_assign),
steps_per_epoch=max(1, num_train//args.batch_size),
#validation_data=val_data_generator,
validation_data=data_generator(dataset[num_train:], args.batch_size, input_shape, anchors, num_classes, multi_anchor_assign=args.multi_anchor_assign),
validation_steps=max(1, num_val//args.batch_size),
epochs=args.total_epoch,
initial_epoch=epochs,
#verbose=1,
workers=1,
use_multiprocessing=False,
max_queue_size=10,
callbacks=callbacks)
# Finally store model
if args.model_pruning:
model = sparsity.strip_pruning(model)
model.save(os.path.join(log_dir, 'trained_final.h5'))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# Model definition options
parser.add_argument('--model_type', type=str, required=False, default='yolo3_mobilenet_lite',
help='YOLO model type: yolo3_mobilenet_lite/tiny_yolo3_mobilenet/yolo3_darknet/..., default=%(default)s')
parser.add_argument('--anchors_path', type=str, required=False, default=os.path.join('configs', 'yolo3_anchors.txt'),
help='path to anchor definitions, default=%(default)s')
parser.add_argument('--model_image_size', type=str, required=False, default='416x416',
help = "Initial model image input size as <height>x<width>, default=%(default)s")
parser.add_argument('--weights_path', type=str, required=False, default=None,
help = "Pretrained model/weights file for fine tune")
# Data options
parser.add_argument('--annotation_file', type=str, required=False, default='trainval.txt',
help='train annotation txt file, default=%(default)s')
parser.add_argument('--val_annotation_file', type=str, required=False, default=None,
help='val annotation txt file, default=%(default)s')
parser.add_argument('--val_split', type=float, required=False, default=0.1,
help = "validation data persentage in dataset if no val dataset provide, default=%(default)s")
parser.add_argument('--classes_path', type=str, required=False, default=os.path.join('configs', 'voc_classes.txt'),
help='path to class definitions, default=%(default)s')
# Training options
parser.add_argument('--batch_size', type=int, required=False, default=16,
help = "Batch size for train, default=%(default)s")
parser.add_argument('--optimizer', type=str, required=False, default='adam', choices=['adam', 'rmsprop', 'sgd'],
help = "optimizer for training (adam/rmsprop/sgd), default=%(default)s")
parser.add_argument('--learning_rate', type=float, required=False, default=1e-3,
help = "Initial learning rate, default=%(default)s")
parser.add_argument('--decay_type', type=str, required=False, default=None, choices=[None, 'cosine', 'exponential', 'polynomial', 'piecewise_constant'],
help = "Learning rate decay type, default=%(default)s")
parser.add_argument('--transfer_epoch', type=int, required=False, default=20,
help = "Transfer training (from Imagenet) stage epochs, default=%(default)s")
parser.add_argument('--freeze_level', type=int,required=False, default=None, choices=[None, 0, 1, 2],
help = "Freeze level of the model in transfer training stage. 0:NA/1:backbone/2:only open prediction layer")
parser.add_argument('--init_epoch', type=int,required=False, default=0,
help = "Initial training epochs for fine tune training, default=%(default)s")
parser.add_argument('--total_epoch', type=int,required=False, default=250,
help = "Total training epochs, default=%(default)s")
parser.add_argument('--multiscale', default=False, action="store_true",
help='Whether to use multiscale training')
parser.add_argument('--rescale_interval', type=int, required=False, default=10,
help = "Number of iteration(batches) interval to rescale input size, default=%(default)s")
parser.add_argument('--enhance_augment', type=str, required=False, default=None, choices=[None, 'mosaic'],
help = "enhance data augmentation type (None/mosaic), default=%(default)s")
parser.add_argument('--label_smoothing', type=float, required=False, default=0,
help = "Label smoothing factor (between 0 and 1) for classification loss, default=%(default)s")
parser.add_argument('--multi_anchor_assign', default=False, action="store_true",
help = "Assign multiple anchors to single ground truth")
parser.add_argument('--elim_grid_sense', default=False, action="store_true",
help = "Eliminate grid sensitivity")
parser.add_argument('--data_shuffle', default=False, action="store_true",
help='Whether to shuffle train/val data for cross-validation')
parser.add_argument('--gpu_num', type=int, required=False, default=1,
help='Number of GPU to use, default=%(default)s')
parser.add_argument('--model_pruning', default=False, action="store_true",
help='Use model pruning for optimization, only for TF 1.x')
# Evaluation options
parser.add_argument('--eval_online', default=False, action="store_true",
help='Whether to do evaluation on validation dataset during training')
parser.add_argument('--eval_epoch_interval', type=int, required=False, default=10,
help = "Number of iteration(epochs) interval to do evaluation, default=%(default)s")
parser.add_argument('--save_eval_checkpoint', default=False, action="store_true",
help='Whether to save checkpoint with best evaluation result')
args = parser.parse_args()
height, width = args.model_image_size.split('x')
args.model_image_size = (int(height), int(width))
main(args)
|
the-stack_0_18624 | import numpy as np
import matplotlib
import matplotlib.pyplot as plt
SMALL_SIZE = 16
MEDIUM_SIZE = 20
BIGGER_SIZE = 22
plt.rc('font', size=BIGGER_SIZE) # controls default text sizes
plt.rc('axes', titlesize=BIGGER_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=BIGGER_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=BIGGER_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=BIGGER_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=BIGGER_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
plt.rcParams.update({'text.color' : "black",
'axes.labelcolor' : "black"})
from matplotlib.patches import Circle, Wedge, Polygon, ArrowStyle
from matplotlib.collections import PatchCollection
from matplotlib import patches
import collections
import networkx as nx
from scipy.sparse import csr_matrix
colors = np.array(["b", "g", "r", "c", "m", "y", "k", "w"])
# Reflection (circle inversion of x through orthogonal circle centered at a)
def isometric_transform(a, x):
r2 = np.linalg.norm(a, axis=-1, keepdims=True)**2 - (1.0)
return r2 / np.linalg.norm(x - a, axis=-1, keepdims=True)**2 * (x - a) + a
# Inversion taking mu to origin
def reflect_at_zero(mu, x):
a = mu / np.linalg.norm(mu, axis=-1, keepdims=True)**2
return isometric_transform(a, x)
def hyperbolic_setup(fig, ax):
fig.set_size_inches(10.0, 10.0, forward=True)
# set axes
ax.set_ylim([-1.2, 1.2])
ax.set_xlim([-1.2, 1.2])
# draw Poincare disk boundary
e = patches.Arc((0,0), 2.0, 2.0,
linewidth=2, fill=False, zorder=2)
ax.add_patch(e)
# collinearity check. if collinear, draw a line and don't attempt curve
def collinear(a,b,c, eps=1e-4):
slope1 = (c[:,1]-b[:,1])/(c[:,0]-b[:,0])
slope2 = (a[:,1]-c[:,1])/(a[:,0]-c[:,0])
return np.logical_or(np.logical_and( np.abs(c[:,0] - b[:,0]) < eps, np.abs(c[:,0]-a[:,0]) < eps ),
np.abs(slope1 - slope2) < eps)
def get_circle_center(a,b,c):
m = np.zeros([len(a), 2,2])
m[:,0,0] = 2*(c[:,0]-a[:,0])
m[:,0,1] = 2*(c[:,1]-a[:,1])
m[:,1,0] = 2*(c[:,0]-b[:,0])
m[:,1,1] = 2*(c[:,1]-b[:,1])
v = np.zeros([len(a), 2,1])
v[:,0] = c[:,:1]**2 + c[:,1:]**2 - a[:,:1]**2 - a[:,1:]**2
v[:,1] = c[:,:1]**2 + c[:,1:]**2 - b[:,:1]**2 - b[:,1:]**2
return np.array([(np.linalg.inv(m_).dot(v_)).flatten() for m_, v_ in zip(m, v)])
# distance for Euclidean coordinates
def euclid_dist(a,b):
return np.linalg.norm(a-b, axis=-1, keepdims=False)
def get_third_point(a,b):
b0 = reflect_at_zero(a, b)
c0 = b0 / 2.0
c = reflect_at_zero(a, c0)
return c
# angles for arc
def get_angles(cent, a):
theta = np.rad2deg(np.arctan((a[:,1]-cent[:,1])/(a[:,0]-cent[:,0])))
quad_3_mask = np.logical_and(a[:,0]-cent[:,0] < 0, a[:,1]-cent[:,1] < 0)
quad_2_mask = np.logical_and(a[:,0]-cent[:,0] < 0, a[:,1]-cent[:,1] >= 0)
theta[quad_3_mask] += 180
theta[quad_2_mask] -= 180
theta[theta < 0] += 360
theta[np.logical_and(abs(a[:,0] - cent[:,0]) < 0.1**3, a[:,1] > cent[:,1] )] = 90
theta[np.logical_and(abs(a[:,0] - cent[:,0]) < 0.1**3, a[:,1] <= cent[:,1] )] = 270
return theta
def draw_geodesic(a, b, c, ax, c1=None, c2=None, verbose=False, width=.02):
cent = get_circle_center(a,b,c)
radius = euclid_dist(a, cent)
t1 = get_angles(cent, b)
t2 = get_angles(cent, a)
mask = np.logical_or(np.logical_and(t2 > t1, t2 - t1 < 180), np.logical_and(t1 > t2, t1 - t2 >= 180))
theta1 = np.where(mask, t1, t2)
theta2 = np.where(mask, t2, t1)
collinear_mask = collinear(a, b, c)
mask_ = np.logical_or(collinear_mask, np.abs(t1 - t2) < 10)
coordsA = "data"
coordsB = "data"
for ma_, a_, b_, c_, cent_, radius_, theta1_, theta2_ in zip(mask_, a, b, c, cent, radius, theta1, theta2):
if ma_:
e = patches.ConnectionPatch(a_, b_, coordsA, coordsB,
linewidth=width,
zorder=0,
)
else:
e = patches.Arc((cent_[0], cent_[1]), 2*radius_, 2*radius_,
theta1=theta1_, theta2=theta2_, linewidth=width, fill=False, zorder=0)
ax.add_patch(e)
def draw_graph(graph, embedding, labels, path, s=25):
assert embedding.shape[1] == 2
if isinstance(graph, csr_matrix):
raise NotImplementedError
edges = list(zip(*graph.nonzero()))
else:
edges = list(graph.edges)
if labels is not None:
# filter out noise nodes labelled as -1
# idx, = np.where(labels[:,0] > -1)
num_labels = int(max(set(labels[:,0])) + 1)
# colours = np.random.rand(num_labels, 3)
colours = np.array([
[1,0,0],
[0,1,0],
[0,0,1],
[1,1,0],
[1,0,1],
[0,1,1],
[0,0,0],
[1,1,1]
])
# colours = np.array(["r", "g", "b", "y", "m", "c"])
assert num_labels < len(colours)
else:
idx = np.arange(len(embedding))
colours = None
if not isinstance(edges, np.ndarray):
edges = np.array(edges)
print ("saving two-dimensional poincare plot to {}".format(path))
fig = plt.figure()
title = "Two dimensional poincare plot"
plt.suptitle(title)
ax = fig.add_subplot(111)
hyperbolic_setup(fig, ax)
# a = embedding[edges[:,0]]
# b = embedding[edges[:,1]]
# c = get_third_point(a, b)
# draw_geodesic(a, b, c, ax)
# # s = {n: (bc+.05) * 100 for n, bc in nx.betweenness_centrality(graph).items()}
# # s = [s[n] for n in sorted(graph.nodes)]
# s = np.array([graph.degree(n, weight="weight") for n in sorted(graph.nodes())])
# s = s / s.max() * 100
# ax.scatter(embedding[idx,0], embedding[idx,1],
# c=colours[labels[idx,0]] if labels is not None else None,
# s=s, zorder=2)
pos = {n: emb for n, emb in zip(sorted(graph.nodes()), embedding)}
node_colours = np.array([colours[labels[n, 0]] for n in graph.nodes()]) if colours is not None else None
# bc = nx.betweenness_centrality(graph)
# node_sizes = np.array([(bc[n] + .05) * 50 for n in sorted(graph.nodes())])
node_sizes = np.array([graph.degree(n, weight="weight") for n in graph.nodes()])
node_sizes = node_sizes / node_sizes.max() * 250
nx.draw_networkx_nodes(graph, pos=pos, node_color=node_colours, node_size=node_sizes)
nx.draw_networkx_edges(graph, pos=pos, width=.05, node_size=node_sizes)
# nx.draw_networkx_edge_labels(graph, pos=pos, edge_labels=nx.get_edge_attributes(graph, name="weight"))
if path is not None:
plt.savefig(path)
plt.close()
else:
plt.show()
def plot_degree_dist(graph, name, filename):
plt.style.use("ggplot")
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(111)
degrees = sorted(dict(graph.degree(weight="weight")).values())
deg, counts = zip(*collections.Counter(degrees).items())
deg = np.array(deg)
counts = np.array(counts)
idx = deg > 0
deg = deg[idx]
counts = counts[idx]
counts = counts.astype(float) / counts.sum()
m, c = np.polyfit(np.log(deg), np.log(counts), 1)
y_fit = np.exp(m*np.log(deg) + c)
plt.scatter(deg, counts, marker="x")
plt.plot(deg, y_fit, ':', c="r")
# plt.set(
# # title="{} Degree Distribution".format(name),
# xscale="log", yscale="log",
# # xlabel="Degree", ylabel="Probability",
# )
plt.xlabel('Degree', fontsize=30)
plt.ylabel('Probability', fontsize=30, )
plt.xscale("log")
plt.yscale("log")
ax.xaxis.label.set_color('black')
ax.yaxis.label.set_color('black')
# ax.xaxis.ticks.set_color("black")
ax.tick_params(axis='x', colors='black')
ax.tick_params(axis='y', colors='black')
# ax.set_ylim(bottom=.9)
# plt.show()
print ("saving degree distribution plot to", filename)
plt.savefig(filename, dpi=400)
|
the-stack_0_18628 | # Copyright (c) 2012 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For CellsScheduler
"""
import copy
import time
from oslo.config import cfg
from nova import block_device
from nova.cells import filters
from nova.cells import weights
from nova.compute import vm_states
from nova import context
from nova import db
from nova import exception
from nova.openstack.common import uuidutils
from nova.scheduler import utils as scheduler_utils
from nova import test
from nova.tests.cells import fakes
from nova.tests import fake_instance
from nova import utils
CONF = cfg.CONF
CONF.import_opt('scheduler_retries', 'nova.cells.scheduler', group='cells')
CONF.import_opt('scheduler_filter_classes', 'nova.cells.scheduler',
group='cells')
CONF.import_opt('scheduler_weight_classes', 'nova.cells.scheduler',
group='cells')
class FakeFilterClass1(filters.BaseCellFilter):
pass
class FakeFilterClass2(filters.BaseCellFilter):
pass
class FakeWeightClass1(weights.BaseCellWeigher):
pass
class FakeWeightClass2(weights.BaseCellWeigher):
pass
class CellsSchedulerTestCase(test.TestCase):
"""Test case for CellsScheduler class."""
def setUp(self):
super(CellsSchedulerTestCase, self).setUp()
self.flags(scheduler_filter_classes=[], scheduler_weight_classes=[],
group='cells')
self._init_cells_scheduler()
def _init_cells_scheduler(self):
fakes.init(self)
self.msg_runner = fakes.get_message_runner('api-cell')
self.scheduler = self.msg_runner.scheduler
self.state_manager = self.msg_runner.state_manager
self.my_cell_state = self.state_manager.get_my_state()
self.ctxt = context.RequestContext('fake', 'fake')
instance_uuids = []
for x in xrange(3):
instance_uuids.append(uuidutils.generate_uuid())
self.instance_uuids = instance_uuids
self.instances = [{'uuid': uuid} for uuid in instance_uuids]
self.request_spec = {
'instance_uuids': instance_uuids,
'instance_properties': self.instances[0],
'instance_type': 'fake_type',
'image': 'fake_image'}
self.build_inst_kwargs = {
'instances': self.instances,
'image': 'fake_image',
'filter_properties': {'instance_type': 'fake_type'},
'security_groups': 'fake_sec_groups',
'block_device_mapping': 'fake_bdm'}
def test_create_instances_here(self):
# Just grab the first instance type
inst_type = db.flavor_get(self.ctxt, 1)
image = {'properties': {}}
instance_uuids = self.instance_uuids
instance_props = {'id': 'removed',
'security_groups': 'removed',
'info_cache': 'removed',
'name': 'instance-00000001',
'hostname': 'meow',
'display_name': 'moo',
'image_ref': 'fake_image_ref',
'user_id': self.ctxt.user_id,
# Test these as lists
'metadata': [{'key': 'moo', 'value': 'cow'}],
'system_metadata': [{'key': 'meow', 'value': 'cat'}],
'project_id': self.ctxt.project_id}
call_info = {'uuids': []}
block_device_mapping = [block_device.create_image_bdm(
'fake_image_ref')]
def _fake_instance_update_at_top(_ctxt, instance):
call_info['uuids'].append(instance['uuid'])
self.stubs.Set(self.msg_runner, 'instance_update_at_top',
_fake_instance_update_at_top)
self.scheduler._create_instances_here(self.ctxt, instance_uuids,
instance_props, inst_type, image,
['default'], block_device_mapping)
self.assertEqual(instance_uuids, call_info['uuids'])
for instance_uuid in instance_uuids:
instance = db.instance_get_by_uuid(self.ctxt, instance_uuid)
meta = utils.instance_meta(instance)
self.assertEqual('cow', meta['moo'])
sys_meta = utils.instance_sys_meta(instance)
self.assertEqual('cat', sys_meta['meow'])
self.assertEqual('meow', instance['hostname'])
self.assertEqual('moo-%s' % instance['uuid'],
instance['display_name'])
self.assertEqual('fake_image_ref', instance['image_ref'])
def test_build_instances_selects_child_cell(self):
# Make sure there's no capacity info so we're sure to
# select a child cell
our_cell_info = self.state_manager.get_my_state()
our_cell_info.capacities = {}
call_info = {'times': 0}
orig_fn = self.msg_runner.build_instances
def msg_runner_build_instances(ctxt, target_cell, build_inst_kwargs):
# This gets called twice. Once for our running it
# in this cell.. and then it'll get called when the
# child cell is picked. So, first time.. just run it
# like normal.
if not call_info['times']:
call_info['times'] += 1
return orig_fn(ctxt, target_cell, build_inst_kwargs)
call_info['ctxt'] = ctxt
call_info['target_cell'] = target_cell
call_info['build_inst_kwargs'] = build_inst_kwargs
def fake_build_request_spec(ctxt, image, instances):
request_spec = {
'instance_uuids': [inst['uuid'] for inst in instances],
'image': image}
return request_spec
self.stubs.Set(self.msg_runner, 'build_instances',
msg_runner_build_instances)
self.stubs.Set(scheduler_utils, 'build_request_spec',
fake_build_request_spec)
self.msg_runner.build_instances(self.ctxt, self.my_cell_state,
self.build_inst_kwargs)
self.assertEqual(self.ctxt, call_info['ctxt'])
self.assertEqual(self.build_inst_kwargs,
call_info['build_inst_kwargs'])
child_cells = self.state_manager.get_child_cells()
self.assertIn(call_info['target_cell'], child_cells)
def test_build_instances_selects_current_cell(self):
# Make sure there's no child cells so that we will be
# selected
self.state_manager.child_cells = {}
call_info = {}
build_inst_kwargs = copy.deepcopy(self.build_inst_kwargs)
def fake_create_instances_here(ctxt, instance_uuids,
instance_properties, instance_type, image, security_groups,
block_device_mapping):
call_info['ctxt'] = ctxt
call_info['instance_uuids'] = instance_uuids
call_info['instance_properties'] = instance_properties
call_info['instance_type'] = instance_type
call_info['image'] = image
call_info['security_groups'] = security_groups
call_info['block_device_mapping'] = block_device_mapping
instances = [fake_instance.fake_instance_obj(ctxt, **instance)
for instance in self.instances]
return instances
def fake_rpc_build_instances(ctxt, **build_inst_kwargs):
call_info['build_inst_kwargs'] = build_inst_kwargs
def fake_build_request_spec(ctxt, image, instances):
request_spec = {
'instance_uuids': [inst['uuid'] for inst in instances],
'image': image}
return request_spec
self.stubs.Set(self.scheduler, '_create_instances_here',
fake_create_instances_here)
self.stubs.Set(self.scheduler.compute_task_api,
'build_instances', fake_rpc_build_instances)
self.stubs.Set(scheduler_utils, 'build_request_spec',
fake_build_request_spec)
self.msg_runner.build_instances(self.ctxt, self.my_cell_state,
build_inst_kwargs)
self.assertEqual(self.ctxt, call_info['ctxt'])
self.assertEqual(self.instance_uuids, call_info['instance_uuids'])
self.assertEqual(self.build_inst_kwargs['instances'][0],
call_info['instance_properties'])
self.assertEqual(
self.build_inst_kwargs['filter_properties']['instance_type'],
call_info['instance_type'])
self.assertEqual(self.build_inst_kwargs['image'], call_info['image'])
self.assertEqual(self.build_inst_kwargs['security_groups'],
call_info['security_groups'])
self.assertEqual(self.build_inst_kwargs['block_device_mapping'],
call_info['block_device_mapping'])
self.assertEqual(build_inst_kwargs,
call_info['build_inst_kwargs'])
self.assertEqual(self.instance_uuids, call_info['instance_uuids'])
def test_build_instances_retries_when_no_cells_avail(self):
self.flags(scheduler_retries=7, group='cells')
call_info = {'num_tries': 0, 'errored_uuids': []}
def fake_grab_target_cells(filter_properties):
call_info['num_tries'] += 1
raise exception.NoCellsAvailable()
def fake_sleep(_secs):
return
def fake_instance_update(ctxt, instance_uuid, values):
self.assertEqual(vm_states.ERROR, values['vm_state'])
call_info['errored_uuids'].append(instance_uuid)
def fake_build_request_spec(ctxt, image, instances):
request_spec = {
'instance_uuids': [inst['uuid'] for inst in instances],
'image': image}
return request_spec
self.stubs.Set(self.scheduler, '_grab_target_cells',
fake_grab_target_cells)
self.stubs.Set(time, 'sleep', fake_sleep)
self.stubs.Set(db, 'instance_update', fake_instance_update)
self.stubs.Set(scheduler_utils, 'build_request_spec',
fake_build_request_spec)
self.msg_runner.build_instances(self.ctxt, self.my_cell_state,
self.build_inst_kwargs)
self.assertEqual(8, call_info['num_tries'])
self.assertEqual(self.instance_uuids, call_info['errored_uuids'])
def test_schedule_method_on_random_exception(self):
self.flags(scheduler_retries=7, group='cells')
instances = [{'uuid': uuid} for uuid in self.instance_uuids]
method_kwargs = {
'image': 'fake_image',
'instances': instances,
'filter_properties': {}}
call_info = {'num_tries': 0,
'errored_uuids1': [],
'errored_uuids2': []}
def fake_grab_target_cells(filter_properties):
call_info['num_tries'] += 1
raise test.TestingException()
def fake_instance_update(ctxt, instance_uuid, values):
self.assertEqual(vm_states.ERROR, values['vm_state'])
call_info['errored_uuids1'].append(instance_uuid)
def fake_instance_update_at_top(ctxt, instance):
self.assertEqual(vm_states.ERROR, instance['vm_state'])
call_info['errored_uuids2'].append(instance['uuid'])
def fake_build_request_spec(ctxt, image, instances):
request_spec = {
'instance_uuids': [inst['uuid'] for inst in instances],
'image': image}
return request_spec
self.stubs.Set(self.scheduler, '_grab_target_cells',
fake_grab_target_cells)
self.stubs.Set(db, 'instance_update', fake_instance_update)
self.stubs.Set(self.msg_runner, 'instance_update_at_top',
fake_instance_update_at_top)
self.stubs.Set(scheduler_utils, 'build_request_spec',
fake_build_request_spec)
self.msg_runner.build_instances(self.ctxt, self.my_cell_state,
method_kwargs)
# Shouldn't retry
self.assertEqual(1, call_info['num_tries'])
self.assertEqual(self.instance_uuids, call_info['errored_uuids1'])
self.assertEqual(self.instance_uuids, call_info['errored_uuids2'])
def test_filter_schedule_skipping(self):
# if a filter handles scheduling, short circuit
def _grab(filter_properties):
return None
self.stubs.Set(self.scheduler, '_grab_target_cells', _grab)
def _test(self, *args):
raise test.TestingException("shouldn't be called")
try:
self.scheduler._schedule_build_to_cells(None, None, None, _test,
None)
except test.TestingException:
self.fail("Scheduling did not properly short circuit")
def test_cells_filter_args_correct(self):
# Re-init our fakes with some filters.
our_path = 'nova.tests.cells.test_cells_scheduler'
cls_names = [our_path + '.' + 'FakeFilterClass1',
our_path + '.' + 'FakeFilterClass2']
self.flags(scheduler_filter_classes=cls_names, group='cells')
self._init_cells_scheduler()
# Make sure there's no child cells so that we will be
# selected. Makes stubbing easier.
self.state_manager.child_cells = {}
call_info = {}
def fake_create_instances_here(ctxt, instance_uuids,
instance_properties, instance_type, image, security_groups,
block_device_mapping):
call_info['ctxt'] = ctxt
call_info['instance_uuids'] = instance_uuids
call_info['instance_properties'] = instance_properties
call_info['instance_type'] = instance_type
call_info['image'] = image
call_info['security_groups'] = security_groups
call_info['block_device_mapping'] = block_device_mapping
def fake_rpc_build_instances(ctxt, **host_sched_kwargs):
call_info['host_sched_kwargs'] = host_sched_kwargs
def fake_get_filtered_objs(filter_classes, cells, filt_properties):
call_info['filt_classes'] = filter_classes
call_info['filt_cells'] = cells
call_info['filt_props'] = filt_properties
return cells
def fake_build_request_spec(ctxt, image, instances):
request_spec = {
'instance_uuids': [inst['uuid'] for inst in instances],
'instance_properties': instances[0],
'image': image,
'instance_type': 'fake_type'}
return request_spec
self.stubs.Set(self.scheduler, '_create_instances_here',
fake_create_instances_here)
self.stubs.Set(self.scheduler.compute_task_api,
'build_instances', fake_rpc_build_instances)
self.stubs.Set(scheduler_utils, 'build_request_spec',
fake_build_request_spec)
filter_handler = self.scheduler.filter_handler
self.stubs.Set(filter_handler, 'get_filtered_objects',
fake_get_filtered_objs)
host_sched_kwargs = {'image': 'fake_image',
'instances': self.instances,
'filter_properties':
{'instance_type': 'fake_type'},
'security_groups': 'fake_sec_groups',
'block_device_mapping': 'fake_bdm'}
self.msg_runner.build_instances(self.ctxt,
self.my_cell_state, host_sched_kwargs)
# Our cell was selected.
self.assertEqual(self.ctxt, call_info['ctxt'])
self.assertEqual(self.instance_uuids, call_info['instance_uuids'])
self.assertEqual(self.request_spec['instance_properties'],
call_info['instance_properties'])
self.assertEqual(self.request_spec['instance_type'],
call_info['instance_type'])
self.assertEqual(self.request_spec['image'], call_info['image'])
self.assertEqual(host_sched_kwargs, call_info['host_sched_kwargs'])
# Filter args are correct
expected_filt_props = {'context': self.ctxt,
'scheduler': self.scheduler,
'routing_path': self.my_cell_state.name,
'host_sched_kwargs': host_sched_kwargs,
'request_spec': self.request_spec,
'instance_type': 'fake_type'}
self.assertEqual(expected_filt_props, call_info['filt_props'])
self.assertEqual([FakeFilterClass1, FakeFilterClass2],
call_info['filt_classes'])
self.assertEqual([self.my_cell_state], call_info['filt_cells'])
def test_cells_filter_returning_none(self):
# Re-init our fakes with some filters.
our_path = 'nova.tests.cells.test_cells_scheduler'
cls_names = [our_path + '.' + 'FakeFilterClass1',
our_path + '.' + 'FakeFilterClass2']
self.flags(scheduler_filter_classes=cls_names, group='cells')
self._init_cells_scheduler()
# Make sure there's no child cells so that we will be
# selected. Makes stubbing easier.
self.state_manager.child_cells = {}
call_info = {'scheduled': False}
def fake_create_instances_here(ctxt, request_spec):
# Should not be called
call_info['scheduled'] = True
def fake_get_filtered_objs(filter_classes, cells, filt_properties):
# Should cause scheduling to be skipped. Means that the
# filter did it.
return None
self.stubs.Set(self.scheduler, '_create_instances_here',
fake_create_instances_here)
filter_handler = self.scheduler.filter_handler
self.stubs.Set(filter_handler, 'get_filtered_objects',
fake_get_filtered_objs)
self.msg_runner.build_instances(self.ctxt,
self.my_cell_state, {})
self.assertFalse(call_info['scheduled'])
def test_cells_weight_args_correct(self):
# Re-init our fakes with some filters.
our_path = 'nova.tests.cells.test_cells_scheduler'
cls_names = [our_path + '.' + 'FakeWeightClass1',
our_path + '.' + 'FakeWeightClass2']
self.flags(scheduler_weight_classes=cls_names, group='cells')
self._init_cells_scheduler()
# Make sure there's no child cells so that we will be
# selected. Makes stubbing easier.
self.state_manager.child_cells = {}
call_info = {}
def fake_create_instances_here(ctxt, instance_uuids,
instance_properties, instance_type, image, security_groups,
block_device_mapping):
call_info['ctxt'] = ctxt
call_info['instance_uuids'] = instance_uuids
call_info['instance_properties'] = instance_properties
call_info['instance_type'] = instance_type
call_info['image'] = image
call_info['security_groups'] = security_groups
call_info['block_device_mapping'] = block_device_mapping
def fake_rpc_build_instances(ctxt, **host_sched_kwargs):
call_info['host_sched_kwargs'] = host_sched_kwargs
def fake_get_weighed_objs(weight_classes, cells, filt_properties):
call_info['weight_classes'] = weight_classes
call_info['weight_cells'] = cells
call_info['weight_props'] = filt_properties
return [weights.WeightedCell(cells[0], 0.0)]
def fake_build_request_spec(ctxt, image, instances):
request_spec = {
'instance_uuids': [inst['uuid'] for inst in instances],
'instance_properties': instances[0],
'image': image,
'instance_type': 'fake_type'}
return request_spec
self.stubs.Set(self.scheduler, '_create_instances_here',
fake_create_instances_here)
self.stubs.Set(scheduler_utils, 'build_request_spec',
fake_build_request_spec)
self.stubs.Set(self.scheduler.compute_task_api,
'build_instances', fake_rpc_build_instances)
weight_handler = self.scheduler.weight_handler
self.stubs.Set(weight_handler, 'get_weighed_objects',
fake_get_weighed_objs)
host_sched_kwargs = {'image': 'fake_image',
'instances': self.instances,
'filter_properties':
{'instance_type': 'fake_type'},
'security_groups': 'fake_sec_groups',
'block_device_mapping': 'fake_bdm'}
self.msg_runner.build_instances(self.ctxt,
self.my_cell_state, host_sched_kwargs)
# Our cell was selected.
self.assertEqual(self.ctxt, call_info['ctxt'])
self.assertEqual(self.instance_uuids, call_info['instance_uuids'])
self.assertEqual(self.request_spec['instance_properties'],
call_info['instance_properties'])
self.assertEqual(self.request_spec['instance_type'],
call_info['instance_type'])
self.assertEqual(self.request_spec['image'], call_info['image'])
self.assertEqual(host_sched_kwargs, call_info['host_sched_kwargs'])
# Weight args are correct
expected_filt_props = {'context': self.ctxt,
'scheduler': self.scheduler,
'routing_path': self.my_cell_state.name,
'host_sched_kwargs': host_sched_kwargs,
'request_spec': self.request_spec,
'instance_type': 'fake_type'}
self.assertEqual(expected_filt_props, call_info['weight_props'])
self.assertEqual([FakeWeightClass1, FakeWeightClass2],
call_info['weight_classes'])
self.assertEqual([self.my_cell_state], call_info['weight_cells'])
|
the-stack_0_18630 | # encoding: utf-8
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
import pkg_resources
# -- Project information -----------------------------------------------------
project = 'CycloneDX Python'
copyright = '2022, Copyright (c) OWASP Foundation'
author = 'Paul Horton, Jan Kowalleck, Steve Springett, Patrick Dwyer'
# The full version, including alpha/beta/rc tags
release = pkg_resources.get_distribution("cyclonedx-bom").version
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.viewcode",
"autoapi.extension",
"sphinx_rtd_theme",
"m2r2"
]
# Document Python Code
autoapi_type = 'python'
autoapi_dirs = ['../cyclonedx_py']
source_suffix = ['.rst', '.md']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
|
the-stack_0_18632 | import torch
import torch.nn as nn
from torch.distributions.categorical import Categorical
from torch.optim import Adam
import gym
import numpy as np
def mlp(sizes, activation=nn.ReLU6, output_activation=nn.Identity):
layers = []
for j in range(len(sizes) - 1):
act = activation if j < len(sizes) - 2 else output_activation
layers += [nn.Linear(sizes[j], sizes[j + 1], act())]
return nn.Sequential(*layers)
def list_to_tensor(list_arr, dtype=torch.float32):
return torch.tensor(np.array(list_arr), dtype=dtype)
def train(env_name, hidden_sizes=[32], lr=1e-2, epochs=50,
batch_size=5000, render=False):
assert env_name
env = gym.make(env_name)
obs_dim = env.observation_space.shape[0]
n_acts = env.action_space.n
logits_net = mlp(sizes=[obs_dim] + hidden_sizes + [n_acts])
# action distribution
def get_policy(obs):
logits = logits_net(obs)
return Categorical(logits=logits)
def get_action(obs):
return get_policy(obs).sample().item()
def compute_loss(obs, act, weights):
logp = get_policy(obs).log_prob(act)
return -(logp * weights).mean()
optimizer = Adam(logits_net.parameters(), lr=lr)
def train_one_epoch():
batch_obs = []
batch_acts = []
batch_weights = []
batch_rets = []
batch_lens = []
# reset episode-specific variables
obs = env.reset()
done = False
ep_rews = [] # list for rewards accrued throughout ep.
finished_rending_this_epoch = False
while True:
if not finished_rending_this_epoch and render:
env.render()
# save obs
batch_obs.append(obs.copy())
# act
act = get_action(torch.as_tensor(obs, dtype=torch.float32))
obs, rew, done, _ = env.step(act)
# save action, reward
batch_acts.append(act)
ep_rews.append(rew)
if done:
ep_ret, ep_len = sum(ep_rews), len(ep_rews)
batch_rets.append(ep_ret)
batch_lens.append(ep_len)
# the weight for each logprob(a|s) is R(tau)
batch_weights += [ep_ret] * ep_len
# reset episode-specific variables
obs, done, ep_rews = env.reset(), False, []
finished_rending_this_epoch = True
if len(batch_obs) > batch_size:
break
optimizer.zero_grad()
if i == 5: # TODO: remove after check
breakpoint()
batch_loss = compute_loss(obs=list_to_tensor(batch_obs),
act=list_to_tensor(batch_acts, dtype=torch.int32),
weights=list_to_tensor(batch_weights))
batch_loss.backward()
optimizer.step()
return batch_loss, batch_rets, batch_lens
# training loop
for i in range(epochs):
batch_loss, batch_rets, batch_lens = train_one_epoch()
print(f"epoch: {i:3d}\t loss:{batch_loss:.3f}\t"
f"return: {np.mean(batch_rets):.3f}\t ep_len: {np.mean(batch_lens):.3f}\t")
if __name__ == "__main__":
# Test
# m = Categorical(torch.tensor([1., 1, 1, 1, 1]))
# for i in range(10):
# r = m.sample()
# print(r)
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--env_name', '--env', type=str, default='CartPole-v0')
parser.add_argument('--render', action='store_true')
parser.add_argument('--lr', type=float, default=1e-2)
args = parser.parse_args()
print("\nSimplest PG")
train(env_name=args.env_name, render=args.render, lr=args.lr)
|
the-stack_0_18633 | import numpy as np
import pytest
from group_lasso import _subsampling
@pytest.fixture
def row_lengths():
def _row_lengths():
for i in range(2, 20):
yield 2 ** i - 1
return _row_lengths
def test_random_row_idxes_correct_size_fraction(row_lengths):
for row_length in row_lengths():
for fraction in [0.5, 0.1, 1 / np.sqrt(2)]:
row_idxes = _subsampling._get_random_row_idxes(
row_length, fraction, np.random
)
assert len(row_idxes) == int(row_length * fraction)
def test_random_row_idxes_correct_size_integer(row_lengths):
for row_length in row_lengths():
for num in [5, 10, 1000]:
if num > row_length:
continue
row_idxes = _subsampling._get_random_row_idxes(
row_length, num, np.random
)
assert len(row_idxes) == num
def test_random_row_idxes_fails_at_negative_input(row_lengths):
for row_length in row_lengths():
with pytest.raises(ValueError):
row_idxes = _subsampling._get_random_row_idxes(
row_length, -0.1, np.random
)
with pytest.raises(ValueError):
row_idxes = _subsampling._get_random_row_idxes(
row_length, -1, np.random
)
with pytest.raises(ValueError):
row_idxes = _subsampling._get_random_row_idxes(
row_length, 0, np.random
)
with pytest.raises(ValueError):
row_idxes = _subsampling._get_random_row_idxes(
row_length, row_length + 1, np.random
)
with pytest.raises(ValueError):
row_idxes = _subsampling._get_random_row_idxes(
row_length, "invalid", np.random
)
def test_random_row_idxes_sqrt(row_lengths):
for row_length in row_lengths():
row_idxes = _subsampling._get_random_row_idxes(
row_length, "sqrt", np.random
)
assert len(row_idxes) == int(np.sqrt(row_length))
def test_random_row_idxes_unique(row_lengths):
for row_length in row_lengths():
row_idxes = _subsampling._get_random_row_idxes(
row_length, "sqrt", np.random
)
assert len(row_idxes) == len(set(row_idxes))
class TestSubsampler:
def test_subsampling_scheme_1_is_identity(self, row_lengths):
for row_length in row_lengths():
subsampler = _subsampling.Subsampler(
row_length, 1, random_state=np.random.RandomState(None)
)
X = np.random.standard_normal((row_length, 5))
assert np.all(subsampler.subsample(X) == X)
y = np.random.standard_normal(row_length)
Xs, ys = subsampler.subsample(X, y)
assert np.all(X == Xs) and np.all(y == ys)
|
the-stack_0_18635 | #!/usr/bin/env python3
from socketserver import TCPServer
from http.server import BaseHTTPRequestHandler
from http import HTTPStatus
import http.client
import json
import os
from collections import OrderedDict
# Credits:
# https://gist.github.com/bradmontgomery/2219997
# https://stackoverflow.com/questions/3474045/problems-with-my-basehttpserver
# https://docs.python.org/3.5/library/http.server.html
class RequestHandler(BaseHTTPRequestHandler):
def _headers(self):
self.send_response(HTTPStatus.OK)
self.send_header("Content-Type", "application/javascript; charset=UTF-8")
self.end_headers()
def _content(self, content=None):
if content is None:
with open("status.txt") as f:
status = f.read().strip() == "opened"
changetime = int(os.path.getmtime("status.txt"))
data = OrderedDict({
"api": "0.13",
"space": "nordlab e. V.",
"logo": "http://nordlab-ev.de/img/null.png",
"url": "http://nordlab-ev.de",
"location": {
"address": "nordlab e. V., Offener Kanal Flensburg, St.-Jürgen-Straße 95, 24937 Flensburg",
"lat": 54.791614,
"lon": 9.442367
},
"contact": {
"ml": "[email protected]",
"irc": "irc://irc.lugfl.de:6668/#hackerspace",
"twitter": "@nordlab",
"phone": "+49 461 574945880"
},
"issue_report_channels": [ "twitter" ],
"state": {
"open": status,
"lastchange": changetime
},
"projects": [ "http://freifunk-flensburg.de" ],
"open": status,
"icon": {
"open": "",
"closed": ""
}
})
content = json.dumps(data).encode("utf-8")
return content.strip()
def do_HEAD(self):
self._headers()
def do_GET(self):
self._headers()
self.wfile.write(self._content())
def do_POST(self):
length = int(self.headers.get("Content-Length"))
data = self.rfile.read(length)
self._headers()
self.wfile.write(self._content(data.decode("utf-8")).encode("utf-8"))
if __name__ == "__main__":
HOST, PORT = "localhost", 8001
try:
httpd = TCPServer((HOST, PORT), RequestHandler)
print("Serving HTTP at port", PORT)
httpd.serve_forever()
except KeyboardInterrupt:
print("Shutting down HTTP server")
httpd.shutdown()
httpd.server_close()
print()
|
the-stack_0_18638 | import numpy as np
import cv2
from sklearn.externals import joblib
from face_detector import get_face_detector, find_faces
def calc_hist(img):
"""
To calculate histogram of an RGB image
Parameters
----------
img : Array of uint8
Image whose histogram is to be calculated
Returns
-------
histogram : np.array
The required histogram
"""
histogram = [0] * 3
for j in range(3):
histr = cv2.calcHist([img], [j], None, [256], [0, 256])
histr *= 255.0 / histr.max()
histogram[j] = histr
return np.array(histogram)
face_model = get_face_detector()
clf = joblib.load('models/face_spoofing.pkl')
cap = cv2.VideoCapture(0)
sample_number = 1
count = 0
measures = np.zeros(sample_number, dtype=np.float)
while True:
ret, img = cap.read()
faces = find_faces(img, face_model)
measures[count%sample_number]=0
height, width = img.shape[:2]
for x, y, x1, y1 in faces:
roi = img[y:y1, x:x1]
point = (0,0)
img_ycrcb = cv2.cvtColor(roi, cv2.COLOR_BGR2YCR_CB)
img_luv = cv2.cvtColor(roi, cv2.COLOR_BGR2LUV)
ycrcb_hist = calc_hist(img_ycrcb)
luv_hist = calc_hist(img_luv)
feature_vector = np.append(ycrcb_hist.ravel(), luv_hist.ravel())
feature_vector = feature_vector.reshape(1, len(feature_vector))
prediction = clf.predict_proba(feature_vector)
prob = prediction[0][1]
measures[count % sample_number] = prob
cv2.rectangle(img, (x, y), (x1, y1), (255, 0, 0), 2)
point = (x, y-5)
# print (measures, np.mean(measures))
if 0 not in measures:
text = "True"
if np.mean(measures) >= 0.7:
text = "False"
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img=img, text=text, org=point, fontFace=font, fontScale=0.9, color=(0, 0, 255),
thickness=2, lineType=cv2.LINE_AA)
else:
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img=img, text=text, org=point, fontFace=font, fontScale=0.9,
color=(0, 255, 0), thickness=2, lineType=cv2.LINE_AA)
count+=1
cv2.imshow('img_rgb', img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
|
the-stack_0_18639 | # -*- coding: utf-8 -*-
import numpy as np
from annoy import AnnoyIndex
from gokinjo.backend_base import BackendTransformerBase
class AnnoyTransformer(BackendTransformerBase):
def __init__(self, n_trees=10, search_k=-1, n_neighbors=1):
super().__init__(n_neighbors)
# backend specified parameters
self.n_trees_ = n_trees
self.search_k_ = search_k
def build_model(self, X, k, c):
model = AnnoyIndex(X.shape[1], metric='euclidean')
for j, x in enumerate(X):
model.add_item(j, x)
model.build(n_trees=self.n_trees_)
return model
def measure_distances(self, X, model, k, c):
distances = np.array([self._neighbor_distance(model, x, k) for x in X])
return distances
def _neighbor_distance(self, model, x, k):
_, distances = model.get_nns_by_vector(x, k,
search_k=self.search_k_,
include_distances=True)
return distances
def get_params(self, deep=True):
params = super().get_params(deep)
annoy_params = {
'n_trees': self.n_trees_,
'search_k': self.search_k_,
}
params.update(annoy_params)
return params
|
the-stack_0_18641 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, division
import logging
import sys
from contextlib import contextmanager
from petl.compat import PY3
from petl.io.sources import register_reader, register_writer, get_reader, get_writer
logger = logging.getLogger(__name__)
# region RemoteSource
class RemoteSource(object):
"""Read or write directly from files in remote filesystems.
This source handles many filesystems that are selected based on the
protocol passed in the `url` argument.
The url should be specified in `to..()` and `from...()` functions. E.g.::
>>> import petl as etl
>>>
>>> def example_s3():
... url = 's3://mybucket/prefix/to/myfilename.csv'
... data = b'foo,bar\\na,1\\nb,2\\nc,2\\n'
...
... etl.tocsv(data, url)
... tbl = etl.fromcsv(url)
...
>>> example_s3() # doctest: +SKIP
+-----+-----+
| foo | bar |
+=====+=====+
| 'a' | '1' |
+-----+-----+
| 'b' | '2' |
+-----+-----+
| 'c' | '2' |
+-----+-----+
This source uses `fsspec`_ to provide the data transfer with the remote
filesystem. Check the `Built-in Implementations <fs_builtin>`_ for available
remote implementations.
Some filesystem can use `URL chaining <fs_chain>`_ for compound I/O.
.. note::
For working this source require `fsspec`_ to be installed, e.g.::
$ pip install fsspec
Some remote filesystems require aditional packages to be installed.
Check `Known Implementations <fs_known>`_ for checking what packages
need to be installed, e.g.::
$ pip install s3fs # AWS S3
$ pip install gcsfs # Google Cloud Storage
$ pip install adlfs # Azure Blob service
$ pip install paramiko # SFTP
$ pip install requests # HTTP, github
.. versionadded:: 1.6.0
.. _fsspec: https://filesystem-spec.readthedocs.io/en/latest/
.. _fs_builtin: https://filesystem-spec.readthedocs.io/en/latest/api.html#built-in-implementations
.. _fs_known: https://filesystem-spec.readthedocs.io/en/latest/api.html#other-known-implementations
.. _fs_chain: https://filesystem-spec.readthedocs.io/en/latest/features.html#url-chaining
"""
def __init__(self, url, **kwargs):
self.url = url
self.kwargs = kwargs
def open_file(self, mode="rb"):
import fsspec
# auto_mkdir=True can fail in some filesystems or without permission for full path
# E.g: s3fs tries to create a bucket when writing into a folder does not exists
fs = fsspec.open(self.url, mode=mode, compression='infer', auto_mkdir=False, **self.kwargs)
return fs
@contextmanager
def open(self, mode="rb"):
mode2 = mode[:1] + r"b" # python2
fs = self.open_file(mode=mode2)
with fs as source:
yield source
# registering filesystems with packages installed
def _register_filesystems(only_available=False):
"""Search for python packages supporting remote filesystems."""
from fsspec.registry import known_implementations
impls = known_implementations.items()
r = w = 0
for protocol, spec in impls:
missing_deps = "err" in spec
if missing_deps:
emsg = "# WARN: fsspec {} unavailable: {}".format(protocol, spec["err"])
logger.debug(emsg)
if only_available:
# otherwise foward the exception on first use when the
# handler can show what package is missing
continue
# when missing a package for fsspec use the available source in petl
# E.g: fsspec requires `requests` package installed for handling http and https
reader = get_reader(protocol)
if not missing_deps or reader is None:
register_reader(protocol, RemoteSource)
r += 1
writer = get_writer(protocol)
if not missing_deps or writer is None:
register_writer(protocol, RemoteSource)
w += 1
dlog = "# fsspec: registered %s remote readers and %s remote writers"
logger.debug(dlog, r, w)
def _try_register_filesystems():
try:
import fsspec
except ImportError:
logger.debug("# Missing fsspec package. Install with: pip install fsspec")
else:
try:
_register_filesystems()
except Exception as ex:
raise Exception("# ERROR: failed to register fsspec filesystems", ex)
if PY3:
_try_register_filesystems()
# endregion
# region SMBSource
class SMBSource(object):
"""Downloads or uploads to Windows and Samba network drives. E.g.::
>>> def example_smb():
... import petl as etl
... url = 'smb://user:password@server/share/folder/file.csv'
... data = b'foo,bar\\na,1\\nb,2\\nc,2\\n'
... etl.tocsv(data, url)
... tbl = etl.fromcsv(url)
...
>>> example_smb() # doctest: +SKIP
+-----+-----+
| foo | bar |
+=====+=====+
| 'a' | '1' |
+-----+-----+
| 'b' | '2' |
+-----+-----+
| 'c' | '2' |
+-----+-----+
The argument `url` (str) must have a URI with format:
`smb://workgroup;user:password@server:port/share/folder/file.csv`.
Note that you need to pass in a valid hostname or IP address for the host
component of the URL. Do not use the Windows/NetBIOS machine name for the
host component.
The first component of the path in the URL points to the name of the shared
folder. Subsequent path components will point to the directory/folder/file.
.. note::
For working this source require `smbprotocol`_ to be installed, e.g.::
$ pip install smbprotocol[kerberos]
.. versionadded:: 1.5.0
.. _smbprotocol: https://github.com/jborean93/smbprotocol#requirements
"""
def __init__(self, url, **kwargs):
self.url = url
self.kwargs = kwargs
@contextmanager
def open(self, mode="rb"):
mode2 = mode[:1] + r"b" # python2
source = _open_file_smbprotocol(self.url, mode=mode2, **self.kwargs)
try:
yield source
finally:
source.close()
def _open_file_smbprotocol(url, mode="rb", **kwargs):
_domain, host, port, user, passwd, server_path = _parse_smb_url(url)
import smbclient
try:
# register the server with explicit credentials
if user:
smbclient.register_session(
host, username=user, password=passwd, port=port
)
# Read an existing file as bytes
mode2 = mode[:1] + r"b"
filehandle = smbclient.open_file(server_path, mode=mode2, **kwargs)
return filehandle
except Exception as ex:
raise ConnectionError("SMB error: %s" % ex).with_traceback(sys.exc_info()[2])
def _parse_smb_url(url):
e = "SMB url must be smb://workgroup;user:password@server:port/share/folder/file.txt: "
if not url:
raise ValueError("SMB error: no host given")
if not url.startswith("smb://"):
raise ValueError(e + url)
if PY3:
from urllib.parse import urlparse
else:
from urlparse import urlparse
parsed = urlparse(url)
if not parsed.path:
raise ValueError(e + url)
unc_path = parsed.path.replace("/", "\\")
server_path = "\\\\{}{}".format(parsed.hostname, unc_path)
if not parsed.username:
domain = None
username = None
elif ";" in parsed.username:
domain, username = parsed.username.split(";")
else:
domain, username = None, parsed.username
port = 445 if not parsed.port else int(parsed.port)
return domain, parsed.hostname, port, username, parsed.password, server_path
register_reader("smb", SMBSource)
register_writer("smb", SMBSource)
# endregion
|
the-stack_0_18643 | import numpy as np
from typing import Tuple
from IMLearn.metalearners.adaboost import AdaBoost
from IMLearn.learners.classifiers import DecisionStump
from IMLearn.metrics import accuracy
from utils import *
import plotly.graph_objects as go
from plotly.subplots import make_subplots
def generate_data(n: int, noise_ratio: float) -> Tuple[np.ndarray, np.ndarray]:
"""
Generate a dataset in R^2 of specified size
Parameters
----------
n: int
Number of samples to generate
noise_ratio: float
Ratio of labels to invert
Returns
-------
X: np.ndarray of shape (n_samples,2)
Design matrix of samples
y: np.ndarray of shape (n_samples,)
Labels of samples
"""
'''
generate samples X with shape: (num_samples, 2) and labels y with shape (num_samples).
num_samples: the number of samples to generate
noise_ratio: invert the label for this ratio of the samples
'''
X, y = np.random.rand(n, 2) * 2 - 1, np.ones(n)
y[np.sum(X ** 2, axis=1) < 0.5 ** 2] = -1
y[np.random.choice(n, int(noise_ratio * n))] *= -1
return X, y
def fit_and_evaluate_adaboost(noise, n_learners=250, train_size=5000,
test_size=500):
(train_X, train_y), (test_X, test_y) = generate_data(train_size,
noise), generate_data(
test_size,
noise)
ada_boost = AdaBoost(DecisionStump, n_learners)
# Question 1: Train- and test errors of AdaBoost in noiseless case
ada_boost.fit(train_X, train_y)
# On training
fig = go.Figure(
layout=go.Layout(title="Train- and test errors of AdaBoost in "
"noiseless case"))
train_err = []
test_err = []
num_of_learners = np.arange(1, n_learners + 1)
for t in num_of_learners:
train_err.append(ada_boost.partial_loss(train_X, train_y, t))
test_err.append(ada_boost.partial_loss(test_X, test_y, t))
fig.add_trace(
go.Scatter(x=num_of_learners, y=np.array(train_err), name='Training '
'Error'))
fig.add_trace(
go.Scatter(x=num_of_learners, y=np.array(test_err), name='Test Error'))
fig.update_layout(xaxis_title='#Learners', yaxis_title='Error')
fig.show()
# raise NotImplementedError()
# Question 2: Plotting decision surfaces
T = [5, 50, 100, 250]
fig = make_subplots(rows=2, cols=2,
subplot_titles=[f'{t} learners' for t in T],
horizontal_spacing=0.01, vertical_spacing=.06)
lims = np.array([np.r_[train_X, test_X].min(axis=0),
np.r_[train_X, test_X].max(axis=0)]).T + np.array(
[-.1, .1])
for i, t in enumerate(T):
figs = [decision_surface(lambda X: ada_boost.partial_predict(X, t),
lims[0], lims[1], showscale=False),
go.Scatter(x=test_X[:, 0], y=test_X[:, 1], mode="markers",
showlegend=False,
marker=dict(color=test_y.astype(int),
colorscale=[custom[0], custom[-1]],
line=dict(color="black", width=1)))]
fig.add_traces(figs, rows=(i // 2) + 1, cols=(i % 2) + 1)
fig.update_layout(
title_text='Decision Boundary as a function of #learners')
fig.show()
# raise NotImplementedError()
# Question 3: Decision surface of best performing ensemble
losses = [ada_boost.partial_loss(test_X, test_y, t) for t in
range(n_learners)]
min_t = np.argmin(np.array(losses))
fig = go.Figure(
layout=go.Layout(title=f"Decision Boundary of ensemble size {min_t}\n"
f"Accuracy {1 - losses[min_t]}"))
fig.add_traces(
[decision_surface(lambda X: ada_boost.partial_predict(X, min_t),
lims[0], lims[1], showscale=False),
go.Scatter(x=test_X[:, 0], y=test_X[:, 1], mode="markers",
showlegend=False,
marker=dict(color=test_y.astype(int),
colorscale=[custom[0], custom[-1]],
line=dict(color="black", width=1)))])
fig.show()
# raise NotImplementedError()
# Question 4: Decision surface with weighted samples
fig = go.Figure(
layout=go.Layout(title=f"Decision Boundary of ensemble size 250 "
f"with weighted points"))
fig.add_traces([decision_surface(ada_boost.predict,
lims[0], lims[1], showscale=False),
go.Scatter(x=train_X[:, 0], y=train_X[:, 1],
mode='markers',
showlegend=False,
marker=dict(color=train_y,
colorscale=class_colors(2),
size=(ada_boost.D_ /
np.max(ada_boost.D_)) * 15))])
fig.show()
# raise NotImplementedError()
if __name__ == '__main__':
np.random.seed(0)
fit_and_evaluate_adaboost(0)
fit_and_evaluate_adaboost(0.4)
# raise NotImplementedError()
|
the-stack_0_18645 | from easydict import EasyDict as edict
from pathlib import Path
import torch
from torch.nn import CrossEntropyLoss
from torchvision import transforms as trans
def get_config(training = True, args = None):
conf = edict()
conf.data_path = Path('data')
conf.work_path = Path('work_space/')
conf.model_path = conf.work_path/'models'
conf.log_path = conf.work_path/'log'
# conf.save_path = Path("work_space/save")
# conf.save_path = Path("simclr/model.pth")
conf.save_path = Path("MOCO/resnet50.pth")
conf.input_size = [112,112]
conf.embedding_size = 512
conf.use_mobilfacenet = True
conf.net_depth = 50
conf.drop_ratio = 0.6
conf.arch = args.arch
conf.device = torch.device('cuda:{}'.format(args.gpu_id)) if args and args.gpu_id else torch.device('cpu')
# torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
conf.test_transform = trans.Compose([
trans.ToTensor(),
trans.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
])
conf.data_mode = 'emore'
conf.vgg_folder = conf.data_path/'faces_vgg_112x112'
conf.ms1m_folder = conf.data_path/'faces_ms1m_112x112'
conf.emore_folder = conf.data_path/'faces_emore'
conf.base_folder = conf.data_path/'facebank'/'distinct'/'raw_112'
# conf.batch_size = 100 # irse net depth 50
conf.batch_size = 20 # mobilefacenet 200
#--------------------Training Config ------------------------
if training:
conf.log_path = conf.work_path/'log'
# conf.save_path = Path("work_space/save")
conf.save_path = Path("simclr/model.pth")
# conf.weight_decay = 5e-4
conf.lr = 1e-4 # originally 1e-3
conf.milestones = [12,15,18]
conf.momentum = 0.9
conf.pin_memory = True
conf.num_workers = 2 # 4 when batchsize is 200
# conf.num_workers = 3 # when batchsize is 100
conf.ce_loss = CrossEntropyLoss()
#--------------------Inference Config ------------------------
else:
conf.facebank_path = conf.data_path/'facebank'
conf.threshold = 1.5
conf.face_limit = 1 #10
# when inference, at maximum detect 10 faces in one image, my laptop is slow
conf.min_face_size = 30
# the larger this value, the faster deduction, comes with tradeoff in small faces
return conf |
the-stack_0_18647 | from . import _ffi as ffi
from ctypes import *
from wasmtime import WasmtimeError
import typing
def setter_property(fset: typing.Callable) -> property:
prop = property(fset=fset)
if fset.__doc__:
prop.__doc__ = fset.__doc__
prop.__doc__ += "\n\n Note that this field can only be set, it cannot be read"
return prop
class Config:
"""
Global configuration, used to create an `Engine`.
A `Config` houses a number of configuration options which tweaks how wasm
code is compiled or generated.
"""
def __init__(self) -> None:
self._ptr = ffi.wasm_config_new()
@setter_property
def debug_info(self, enable: bool) -> None:
"""
Configures whether DWARF debug information is emitted for the generated
code. This can improve profiling and the debugging experience.
"""
if not isinstance(enable, bool):
raise TypeError('expected a bool')
ffi.wasmtime_config_debug_info_set(self._ptr, enable)
@setter_property
def wasm_threads(self, enable: bool) -> None:
"""
Configures whether the wasm [threads proposal] is enabled.
[threads proposal]: https://github.com/webassembly/threads
"""
if not isinstance(enable, bool):
raise TypeError('expected a bool')
ffi.wasmtime_config_wasm_threads_set(self._ptr, enable)
@setter_property
def wasm_reference_types(self, enable: bool) -> None:
"""
Configures whether the wasm [reference types proposal] is enabled.
[reference types proposal]: https://github.com/webassembly/reference-types
"""
if not isinstance(enable, bool):
raise TypeError('expected a bool')
ffi.wasmtime_config_wasm_reference_types_set(self._ptr, enable)
@setter_property
def wasm_simd(self, enable: bool) -> None:
"""
Configures whether the wasm [SIMD proposal] is enabled.
[SIMD proposal]: https://github.com/webassembly/simd
"""
if not isinstance(enable, bool):
raise TypeError('expected a bool')
ffi.wasmtime_config_wasm_simd_set(self._ptr, enable)
@setter_property
def wasm_bulk_memory(self, enable: bool) -> None:
"""
Configures whether the wasm [bulk memory proposal] is enabled.
[bulk memory proposal]: https://github.com/webassembly/bulk-memory
"""
if not isinstance(enable, bool):
raise TypeError('expected a bool')
ffi.wasmtime_config_wasm_bulk_memory_set(self._ptr, enable)
@setter_property
def wasm_multi_value(self, enable: bool) -> None:
"""
Configures whether the wasm [multi value proposal] is enabled.
[multi value proposal]: https://github.com/webassembly/multi-value
"""
if not isinstance(enable, bool):
raise TypeError('expected a bool')
ffi.wasmtime_config_wasm_multi_value_set(self._ptr, enable)
@setter_property
def wasm_module_linking(self, enable: bool) -> None:
"""
Configures whether the wasm [module linking proposal] is enabled.
[module linking proposal]: https://github.com/webassembly/module-linking
"""
if not isinstance(enable, bool):
raise TypeError('expected a bool')
ffi.wasmtime_config_wasm_module_linking_set(self._ptr, enable)
@setter_property
def wasm_multi_memory(self, enable: bool) -> None:
"""
Configures whether the wasm [multi memory proposal] is enabled.
[multi memory proposal]: https://github.com/webassembly/multi-memory
"""
if not isinstance(enable, bool):
raise TypeError('expected a bool')
ffi.wasmtime_config_wasm_multi_memory_set(self._ptr, enable)
@setter_property
def wasm_memory64(self, enable: bool) -> None:
"""
Configures whether the wasm [memory64 proposal] is enabled.
[memory64 proposal]: https://github.com/webassembly/memory64
"""
if not isinstance(enable, bool):
raise TypeError('expected a bool')
ffi.wasmtime_config_wasm_memory64_set(self._ptr, enable)
@setter_property
def strategy(self, strategy: str) -> None:
"""
Configures the compilation strategy used for wasm code.
Acceptable values for `strategy` are:
* `"auto"`
* `"cranelift"`
"""
if strategy == "auto":
error = ffi.wasmtime_config_strategy_set(self._ptr, 0)
elif strategy == "cranelift":
error = ffi.wasmtime_config_strategy_set(self._ptr, 1)
else:
raise WasmtimeError("unknown strategy: " + str(strategy))
if error:
raise WasmtimeError._from_ptr(error)
@setter_property
def cranelift_debug_verifier(self, enable: bool) -> None:
if not isinstance(enable, bool):
raise TypeError('expected a bool')
ffi.wasmtime_config_cranelift_debug_verifier_set(self._ptr, enable)
@setter_property
def cranelift_opt_level(self, opt_level: str) -> None:
if opt_level == "none":
ffi.wasmtime_config_cranelift_opt_level_set(self._ptr, 0)
elif opt_level == "speed":
ffi.wasmtime_config_cranelift_opt_level_set(self._ptr, 1)
elif opt_level == "speed_and_size":
ffi.wasmtime_config_cranelift_opt_level_set(self._ptr, 2)
else:
raise WasmtimeError("unknown opt level: " + str(opt_level))
@setter_property
def profiler(self, profiler: str) -> None:
if profiler == "none":
error = ffi.wasmtime_config_profiler_set(self._ptr, 0)
elif profiler == "jitdump":
error = ffi.wasmtime_config_profiler_set(self._ptr, 1)
else:
raise WasmtimeError("unknown profiler: " + str(profiler))
if error:
raise WasmtimeError._from_ptr(error)
@setter_property
def cache(self, enabled: typing.Union[bool, str]) -> None:
"""
Configures whether code caching is enabled for this `Config`.
The value `True` can be passed in here to enable the default caching
configuration and location, or a path to a file can be passed in which
is a path to a TOML configuration file for the cache.
More information about cache configuration can be found at
https://bytecodealliance.github.io/wasmtime/cli-cache.html
"""
if isinstance(enabled, bool):
if not enabled:
raise WasmtimeError("caching cannot be explicitly disabled")
error = ffi.wasmtime_config_cache_config_load(self._ptr, None)
elif isinstance(enabled, str):
error = ffi.wasmtime_config_cache_config_load(self._ptr,
c_char_p(enabled.encode('utf-8')))
else:
raise TypeError("expected string or bool")
if error:
raise WasmtimeError._from_ptr(error)
@setter_property
def interruptable(self, enabled: bool) -> None:
"""
Configures whether wasm execution can be interrupted via interrupt
handles.
"""
if enabled:
val = 1
else:
val = 0
ffi.wasmtime_config_interruptable_set(self._ptr, val)
@setter_property
def consume_fuel(self, instances: bool) -> None:
"""
Configures whether wasm code will consume *fuel* as part of its
execution.
Fuel consumption allows WebAssembly to trap when fuel runs out.
Currently stores start with 0 fuel if this is enabled.
"""
if not isinstance(instances, bool):
raise TypeError('expected an bool')
ffi.wasmtime_config_consume_fuel_set(self._ptr, instances)
def __del__(self) -> None:
if hasattr(self, '_ptr'):
ffi.wasm_config_delete(self._ptr)
|
the-stack_0_18649 | import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="deepface",
version="0.0.76",
author="Sefik Ilkin Serengil",
author_email="[email protected]",
description="A Lightweight Face Recognition and Facial Attribute Analysis Framework (Age, Gender, Emotion, Race) for Python",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/serengil/deepface",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent"
],
entry_points={
"console_scripts":
["deepface = deepface.DeepFace:cli"],
},
python_requires='>=3.5.5',
install_requires=["numpy>=1.14.0", "pandas>=0.23.4", "tqdm>=4.30.0", "gdown>=3.10.1", "Pillow>=5.2.0", "opencv-python>=4.5.5.64", "tensorflow>=1.9.0", "keras>=2.2.0", "Flask>=1.1.2", "mtcnn>=0.1.0", "retina-face>=0.0.1", "fire>=0.4.0"]
)
|
the-stack_0_18650 | # Copyright (c) 2015-2018 Cisco Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
"""Gilt Dependency Module."""
import os
import sh
from molecule import logger
from molecule import util
from molecule.dependency import base
LOG = logger.get_logger(__name__)
class Gilt(base.Base):
"""
`Gilt`_ is an alternate dependency manager.
Additional options can be passed to ``gilt overlay`` through the options
dict. Any option set in this section will override the defaults.
.. code-block:: yaml
dependency:
name: gilt
options:
debug: True
The dependency manager can be disabled by setting ``enabled`` to False.
.. code-block:: yaml
dependency:
name: gilt
enabled: False
Environment variables can be passed to the dependency.
.. code-block:: yaml
dependency:
name: gilt
env:
FOO: bar
.. _`Gilt`: https://gilt.readthedocs.io
"""
def __init__(self, config):
"""Construct Gilt."""
super(Gilt, self).__init__(config)
self._sh_command = None
self.command = "gilt"
@property
def default_options(self):
config = os.path.join(self._config.scenario.directory, "gilt.yml")
d = {"config": config}
if self._config.debug:
d["debug"] = True
return d
@property
def default_env(self):
return util.merge_dicts(os.environ, self._config.env)
def bake(self):
"""
Bake a ``gilt`` command so it's ready to execute and returns None.
:return: None
"""
self._sh_command = getattr(sh, self.command)
self._sh_command = self._sh_command.bake(
self.options, "overlay", _env=self.env, _out=LOG.out, _err=LOG.error
)
def execute(self):
if not self.enabled:
msg = "Skipping, dependency is disabled."
LOG.warning(msg)
return
if not self._has_requirements_file():
msg = "Skipping, missing the requirements file."
LOG.warning(msg)
return
if self._sh_command is None:
self.bake()
self.execute_with_retries()
def _config_file(self):
return self.options.get("config")
def _has_requirements_file(self):
return os.path.isfile(self._config_file())
|
the-stack_0_18651 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
# ---------------------------------------------------------------------------
# ___ __ __ __ ___
# / | \ | \ | \ / Automatic
# \__ |__/ |__/ |___| \__ Annotation
# \ | | | | \ of
# ___/ | | | | ___/ Speech
#
#
# http://www.sppas.org/
#
# ---------------------------------------------------------------------------
# Laboratoire Parole et Langage, Aix-en-Provence, France
# Copyright (C) 2011-2017 Brigitte Bigi
#
# This banner notice must not be removed
# ---------------------------------------------------------------------------
# Use of this software is governed by the GNU Public License, version 3.
#
# SPPAS is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SPPAS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SPPAS. If not, see <http://www.gnu.org/licenses/>.
#
# ---------------------------------------------------------------------------
# File: annotationoptions.py
# ----------------------------------------------------------------------------
import wx
import wx.lib.agw.floatspin
from sppas.src.ui.wxgui.dialogs.basedialog import spBaseDialog
from sppas.src.ui.wxgui.panels.options import sppasOptionsPanel
from sppas.src.ui.wxgui.sp_icons import ANNOTATE_CONFIG_ICON
from sppas.src.ui.wxgui.sp_icons import RESTORE_ICON
# ----------------------------------------------------------------------------
class spAnnotationConfig(spBaseDialog):
"""
@author: Brigitte Bigi
@organization: Laboratoire Parole et Langage, Aix-en-Provence, France
@contact: [email protected]
@license: GPL, v3
@copyright: Copyright (C) 2011-2017 Brigitte Bigi
@summary: Dialog to configure the automatic annotation options.
Parent must be a sppasFrame.
"""
def __init__(self, parent, preferences, step, step_idx):
"""
Constructor.
:param parent must be the sppas main frame
"""
spBaseDialog.__init__(self, parent, preferences, title=" - Options")
wx.GetApp().SetAppName("option"+str(step_idx))
self.step = step
self.stepid = step_idx
self._preferences = preferences
self.LayoutComponents(self._create_title(),
self._create_content(),
self._create_buttons())
# ------------------------------------------------------------------------
# Create the GUI
# ------------------------------------------------------------------------
def _create_title(self):
text = self.GetParent().parameters.get_step_name(self.stepid)+" Configuration"
return self.CreateTitle(ANNOTATE_CONFIG_ICON, text)
def _create_content(self):
options_panel = sppasOptionsPanel(self, self._preferences, self.step.get_options())
options_panel.SetBackgroundColour(self._preferences.GetValue('M_BG_COLOUR'))
options_panel.SetForegroundColour(self._preferences.GetValue('M_FG_COLOUR'))
options_panel.SetFont(self._preferences.GetValue('M_FONT'))
self.items = options_panel.GetItems()
return options_panel
def _create_buttons(self):
btn_restore = self.CreateButton( RESTORE_ICON, " Restore defaults ", "Reset options to their default values" )
btn_cancel = self.CreateCancelButton()
btn_okay = self.CreateOkayButton()
self.Bind(wx.EVT_BUTTON, self._on_restore, btn_restore)
self.Bind(wx.EVT_BUTTON, self._on_cancel, btn_cancel)
self.Bind(wx.EVT_BUTTON, self._on_okay, btn_okay)
return self.CreateButtonBox([btn_restore], [btn_cancel, btn_okay])
# ------------------------------------------------------------------------
# Callbacks to events
# ------------------------------------------------------------------------
def _on_okay(self, evt):
# Save options
for i, item in enumerate(self.items):
self.step.get_option(i).set_value(item.GetValue())
self.GetParent().Update()
self.GetParent().Refresh()
del self.GetParent().opened_frames[self.GetParent().ID_FRAME_ANNOTATION_CFG+self.stepid]
self.Destroy()
def _on_restore(self, evt):
for i, item in enumerate(self.items):
item.SetValue( self.step.get_option(i).get_value())
def _on_cancel(self, evt):
del self.GetParent().opened_frames[self.GetParent().ID_FRAME_ANNOTATION_CFG+self.stepid]
self.Destroy()
# ----------------------------------------------------------------------------
|
the-stack_0_18652 |
import numpy as np
import pandas as pd
from copy import deepcopy
import networkx as nx
import scipy.sparse
from tqdm import tqdm
from scipy.cluster.hierarchy import linkage
from scipy.sparse import coo_matrix, csgraph, csr_matrix
from sklearn.decomposition import NMF
from sklearn.manifold import MDS as sklearn_MDS
from .load_hic_file import get_chromosome_lengths, load_HiC
from ..embedding import pairwise_distances, MDS, tSNE, PHATE, SpectralEmbedding, PCA
from .processing_utils import matrix_operation
# from ..analysis import scatter
from ..analysis import kmeans, spectral_clustering, HAC
import matplotlib.pyplot as plt
import seaborn as sns
class scHiCs:
def __init__(self, list_of_files, reference_genome, resolution,
adjust_resolution=True, sparse=False, chromosomes='all', downsample_depth=None, read_depths=None,
format='customized', keep_n_strata=10, strata_offset=0, exclusive_strata=False, store_full_map=False, strata_downsample=None, strata_n_depth=None,
operations=None, header=0, customized_format=None,
map_filter=0., gzip=False, **kwargs):
"""
Parameters
----------
list_of_files : list
List of HiC file paths.
reference_genome : str or dict
Now supporting 'mm9', 'mm10', 'hg19', 'hg38',
if using other references,you can simply provide the chromosome name
and corresponding size (bp) with a dictionary in Python.
e.g. {'chr1': 150000000, 'chr2': 130000000, 'chr3': 200000000}
resolution : int
The resolution to separate genome into bins. If using .hic file format,
the given resolution must match with the resolutions in .hic file.
adjust_resolution : bool, optional
Whether to adjust resolution for input file.
Sometimes the input file is already in the proper resolution
(e.g. position 3000000 has already been changed to 6 in 500kb resolution),
then you can set `adjust_resolution=False`. The default is True.
sparse : bool, optional
Whether to use sparse matrix to store (only effective when max_distance=None). The default is False.
chromosomes : list or str, optional
Chromosomes to use,
eg. ['chr1', 'chr2'], or just 'except Y', 'except XY','all',
which means chr 1-19 + XY for mouse and chr 1-22 + XY for human.
The default is 'all'.
format : str, optional
HiC files' format.
e.g., '.hic', 'customized', '.cool'. The default is 'customized'.
keep_n_strata : int, optional
Only consider contacts within this genomic distance.
If `None`, it will store full matrices in numpy matrix or
scipy sparse format, which will use too much memory sometimes.
The default is 10.
store_full_map : bool, optional
Whether store contact maps. The default is False.
operations : list, optional
The methods use for pre-processing or smoothing the maps given in a list.
The operations will happen in the given order.
Operations: 'convolution', 'random_walk', 'network_enhancing'.
For pre-processing and smoothing operations, sometimes you need additional arguments.
You can check docstrings for pre-processing and smoothing for more information.
The default is None.
header : int, optional
The number of header line(s).
If `header=0`, HiC files do not have header.
The default is 0.
customized_format : int or list, optional
Format for each line. The default is None.
map_filter : float, optional
The threshold to filter some reads by map quality.
The default is 0..
gzip : bool, optional
If the HiC files are zip files.
If `True`, the HiC files are zip files.
The default is False.
**kwargs :
Other arguments specify smoothing methods passed to function.
See `scHiCTools.load.processing_utils.matrix_operation` function.
Returns
-------
None.
"""
self.resolution = resolution
self.chromosomes, self.chromosome_lengths = get_chromosome_lengths(reference_genome, chromosomes, resolution)
self.num_of_cells = len(list_of_files)
self.sparse = sparse
self.downsample_depth = downsample_depth
self.keep_n_strata = keep_n_strata
self.strata_offset = strata_offset
self.strata_downsample = strata_downsample
self.strata_n_depth = strata_n_depth
self.exclusive_strata = exclusive_strata
self.contacts=np.array([0]*len(list_of_files))
self.short_range=np.array([0]*len(list_of_files))
self.mitotic=np.array([0]*len(list_of_files))
self.files=list_of_files
self.strata = None
if keep_n_strata:
if np.any(np.array([self.chromosome_lengths[ch] for ch in self.chromosomes]) <= keep_n_strata):
keep_n_strata = None
store_full_map = True
else:
self.strata = {
ch: [np.zeros((self.num_of_cells, self.chromosome_lengths[ch] - i - self.strata_offset)) for i in range(keep_n_strata)]
for ch in self.chromosomes}
self.full_maps = None
self.store_full_map = store_full_map
self.similarity_method=None
self.distance=None
assert keep_n_strata is not None or store_full_map is True
assert downsample_depth is None or strata_downsample is None
#assert downsample_depth is None or read_depths is not None # if given downsample depth, we need to pass read depths of each cell
if not store_full_map:
self.full_maps = None
elif sparse:
self.full_maps = {ch: [None] * self.num_of_cells for ch in self.chromosomes}
else:
self.full_maps = {
ch: np.zeros((self.num_of_cells, self.chromosome_lengths[ch], self.chromosome_lengths[ch]))
for ch in self.chromosomes}
print('Preprocessing cells...', operations)
for idx, (file, depth) in tqdm(enumerate(zip(list_of_files, read_depths)), total=len(list_of_files)):
#print('Processing {0} out of {1} files: {2}'.format(idx+1,len(list_of_files),file))
if self.downsample_depth is not None:
downsample_percent = self.downsample_depth / depth
for ch in self.chromosomes:
if ('ch' in ch) and ('chr' not in ch):
ch=ch.replace("ch", "chr")
mat, strata = load_HiC(
file, genome_length=self.chromosome_lengths,
format=format, custom_format=customized_format,
header=header, chromosome=ch, resolution=resolution,
resolution_adjust=adjust_resolution,
map_filter=map_filter, sparse=sparse, gzip=gzip,
strata_offset=self.strata_offset, strata_n_depth=self.strata_n_depth,
keep_n_strata=keep_n_strata, operations=operations,
**kwargs)
self.contacts[idx]+=np.sum(mat)/2+ np.trace(mat)/2
self.short_range[idx]+=sum([np.sum(mat[i,i:i+int(2000000/self.resolution)]) for i in range(len(mat))])
self.mitotic[idx]+=sum([np.sum(mat[i,i+int(2000000/self.resolution):i+int(12000000/self.resolution)]) for i in range(len(mat))])
if store_full_map:
if self.downsample_depth is not None: # downsample all cells to uniform read depth
if depth == 0: # no reads, skip downsampling
self.full_maps[ch][idx] = mat
else:
chr_depth = np.sum(mat)
if chr_depth > 0:
chr_downsample_depth = int(downsample_percent * chr_depth)
flat_mat = np.squeeze(mat.ravel())
p1 = flat_mat / chr_depth
d1 = np.random.choice(np.arange(0, mat.size), size=chr_downsample_depth, replace=True, p=p1)
#d2 = np.random.choice(np.arange(0, mat.shape[0]), size=chr_downsample_depth, replace=True, p=np.sum(p1, axis=0))
new_mat = np.bincount(d1, minlength=mat.size) # count each bin to compute new downsampled stratum
#new_mat = np.zeros(mat.size)
#for i in range(chr_downsample_depth):
# new_mat[d1[i]] += 1 #new_mat[d1[i], d2[i]] + 1
new_mat = np.nan_to_num(new_mat)
new_mat = np.reshape(new_mat, mat.shape)
new_mat = new_mat + new_mat.T # force symmetry
self.full_maps[ch][idx] = new_mat
else:
self.full_maps[ch][idx] = mat
else:
self.full_maps[ch][idx] = mat
if keep_n_strata:
# self.strata[ch][idx] = strata
if self.downsample_depth is not None:
chr_depth = 0
for strata_idx, stratum in enumerate(strata):
chr_depth += np.sum(stratum)
for strata_idx, stratum in enumerate(strata):
old_count = np.sum(stratum)
if old_count == 0:
self.strata[ch][strata_idx][idx, :] = stratum
continue
new_count = int(old_count * downsample_percent)
probs = np.array(stratum) / old_count
sampled_i = np.random.choice(np.arange(0, stratum.size), size=new_count, replace=True, p=probs)
new_stratum = np.zeros_like(stratum)
for i in sampled_i:
new_stratum[i] += 1
self.strata[ch][strata_idx][idx, :] = new_stratum
else:
for strata_idx, stratum in enumerate(strata):
if self.strata_downsample is not None:
if (strata_idx) in self.strata_downsample.keys():
downsample_percent = self.strata_downsample[strata_idx]
old_count = np.sum(stratum)
if old_count == 0:
self.strata[ch][strata_idx][idx, :] = stratum
continue
new_count = int(old_count * downsample_percent)
probs = np.array(stratum) / old_count
sampled_i = np.random.choice(np.arange(0, stratum.size), size=new_count, replace=True, p=probs)
new_stratum = np.zeros_like(stratum)
for i in sampled_i:
new_stratum[i] += 1
self.strata[ch][strata_idx][idx, :] = new_stratum
else:
self.strata[ch][strata_idx][idx, :] = stratum
else:
self.strata[ch][strata_idx][idx, :] = stratum
def cal_strata(self, n_strata):
"""
Alter the number of strata kept in a `scHiCs` object.
Parameters
----------
n_strata : int
Number of strata to keep.
Returns
-------
dict
Strata of cells.
"""
if self.full_maps is None:
if self.exclusive_strata:
#print('Keeping exclusively %d strata' % self.keep_n_strata)
return deepcopy({ch: self.strata[ch][n_strata - 1:n_strata] for ch in self.chromosomes})
if self.keep_n_strata <= n_strata:
#print(' Only {0} strata are kept!'.format(self.keep_n_strata))
return deepcopy(self.strata)
else:
return deepcopy({ch: self.strata[ch][:n_strata] for ch in self.chromosomes})
else:
if self.keep_n_strata is None:
new_strata = {
ch: [np.zeros((self.num_of_cells, self.chromosome_lengths[ch] - i))
for i in range(n_strata)] for ch in self.chromosomes}
for ch in self.chromosomes:
for idx in range(self.num_of_cells):
fmap = self.full_maps[ch][idx].toarray() if self.sparse else self.full_maps[ch][idx]
for i in range(n_strata):
new_strata[ch][i][idx, :] = np.diag(fmap[i:, :-i])
return new_strata
elif self.keep_n_strata >= n_strata:
return deepcopy({ch: self.strata[ch][:n_strata] for ch in self.chromosomes})
else:
for ch in self.chromosomes:
self.strata[ch] += [(np.zeros(self.num_of_cells, self.chromosome_lengths[ch] - i))
for i in range(self.keep_n_strata, n_strata)]
for idx in range(self.num_of_cells):
fmap = self.full_maps[ch][idx].toarray() if self.sparse else self.full_maps[ch][idx]
for i in range(self.keep_n_strata, n_strata):
self.strata[ch][i][idx, :] = np.diag(fmap[i:, :-i])
return deepcopy(self.strata)
def processing(self, operations, **kwargs):
"""
Apply a smoothing method to contact maps.
Requre the `scHiCs` object to store the full map of contacts maps.
Parameters
----------
operations : str
The methods use for smoothing the maps.
Avaliable operations: 'convolution', 'random_walk', 'network_enhancing'.
**kwargs :
Other arguments specify smoothing methods passed to function.
See function `scHiCTools.load.processing_utils.matrix_operation`.
Returns
-------
None.
"""
if self.full_maps is None:
raise ValueError('No full maps stored. Processing is not doable.')
if self.sparse:
for ch in self.chromosomes:
for i, mat in enumerate(self.full_maps[ch]):
self.full_maps[ch][i] = coo_matrix(matrix_operation(mat.toarray(), operations, **kwargs))
else:
for ch in self.chromosomes:
for i, mat in enumerate(self.full_maps[ch]):
self.full_maps[ch][i, :, :] = matrix_operation(mat, operations, **kwargs)
# Update the strata
if self.keep_n_strata is not None:
for ch in self.chromosomes:
for i, mat in enumerate(self.full_maps[ch]):
for j in range(self.keep_n_strata):
self.strata[ch][j][i, :] = np.diag(mat[j:, :len(mat) - j])
def plot_contacts(self, hist=True, percent=True,
size=1.0, bins=10, color='#1f77b4'):
"""
Generate two plots:
Histogram of contacts and
scatter plot of short-range contacts v.s. contacts at the mitotic band.
Parameters
----------
hist : bool, optional
Whether to plot Histogram of contacts.
If `True`, plot Histogram of contacts.
The default is True.
percent : int, optional
Whether to plot scatter plot of short-range contacts v.s. contacts at the mitotic band.
If `True`, plot scatter plot of short-range contacts v.s. contacts at the mitotic band.
The default is True.
size : float, optional
The point size of scatter plot.
The default is 1.0.
bins : int, optional
Number of bins in histogram.
The default is 10.
color : str, optional
The color of the plot.
The default is '#1f77b4'.
Returns
-------
None.
"""
if hist:
if percent:
plt.subplot(1,2,1)
plt.hist(self.contacts,bins=bins,color=color)
plt.xlabel("Number of contacts")
plt.ylabel('Frequency')
plt.title('Histogram of contacts')
if percent:
if hist:
plt.subplot(1,2,2)
plt.scatter(self.mitotic*100/self.contacts,self.short_range*100/self.contacts, s=size, c=color)
plt.xlabel("% Mitotic contacts")
plt.ylabel("% Short-range contacts")
plt.title('Short-range contacts v.s. contacts at the mitotic band')
def select_cells(self, min_n_contacts=0,max_short_range_contact=1):
"""
Select qualify cells based on minimum number of contacts and
maxium percent of short range contact.
Parameters
----------
min_n_contacts : int, optional
The threshold of minimum number of contacts in each cell.
The default is 0.
max_short_range_contact : float, optional
The threshold of maximum proportion of short range contact in every cell.
The default is 1.
Returns
-------
list
Selected files.
"""
files=np.array(self.files)
selected=np.logical_and(self.short_range/self.contacts<=max_short_range_contact,self.contacts>=min_n_contacts)
self.num_of_cells=sum(selected)
self.files=[self.files[i] for i in range(len(files)) if selected[i]]
self.contacts=self.contacts[selected]
self.short_range=self.short_range[selected]
self.mitotic=self.mitotic[selected]
if self.strata is not None:
for ch in self.chromosomes:
self.strata[ch]=[self.strata[ch][i] for i in np.arange(len(selected))[selected]]
if self.full_maps is not None:
for ch in self.chromosomes:
self.full_maps[ch]=self.full_maps[ch][selected]
if self.distance is not None:
self.distance=self.distance[:,selected,:][:,:,selected]
return files[selected]
def graph_distance(self,dim=2,n_clusters=4,cutoff=0.8,n_PCs=10,**kwargs):
if self.full_maps is None:
raise ValueError('No full maps stored. scHiCluster is not doable.')
d = None
for ch in self.chromosomes:
cells = self.full_maps[ch].copy()
n_cells = cells.shape[0]
chr_d = np.zeros((n_cells, n_cells))
specs = {}
for i in range(n_cells):
for j in range(i, n_cells):
if i in specs.keys():
spec1 = specs[i]
else:
g1 = nx.from_numpy_matrix(cells[i])
spec1 = nx.incidence_matrix(g1)
spec1 = csr_matrix(spec1)
specs[i] = spec1
if j in specs.keys():
spec2 = specs[j]
else:
g2 = nx.from_numpy_matrix(cells[j])
spec2 = nx.incidence_matrix(g2)
spec2 = csr_matrix(spec2)
specs[i] = spec2
chr_d[i][j] = scipy.sparse.linalg.norm(spec1 - spec2)
chr_d[j][i] = chr_d[i][j]
print(i, '/', n_cells, chr_d[i][-1])
if d is None:
d = chr_d
else:
d += chr_d
d /= len(self.chromosomes)
mds = sklearn_MDS(n_PCs, dissimilarity='precomputed')
X = mds.fit_transform(d)
label=kmeans(X,n_clusters,kwargs.pop('weights',None),kwargs.pop('iteration',1000))
return X[:,:dim], label
def graph__lap_distance(self,dim=2,n_clusters=4,cutoff=0.8,n_PCs=10,**kwargs):
if self.full_maps is None:
raise ValueError('No full maps stored. scHiCluster is not doable.')
d = None
for ch in self.chromosomes:
cells = self.full_maps[ch].copy()
n_cells = cells.shape[0]
chr_d = np.zeros((n_cells, n_cells))
specs = {}
for i in range(n_cells):
print(i, '/', n_cells)
for j in range(i, n_cells):
if i in specs.keys():
spec1 = specs[i]
else:
spec1 = csgraph.laplacian(cells[i], normed=True)#nx.laplacian_spectrum(graph1)
spec1 = csr_matrix(spec1)
specs[i] = spec1
if j in specs.keys():
spec2 = specs[j]
else:
spec2 = csgraph.laplacian(cells[j], normed=True)#nx.laplacian_spectrum(graph1)
spec2 = csr_matrix(spec2)
specs[j] = spec2
chr_d[i][j] = scipy.sparse.linalg.norm(spec1 - spec2)
chr_d[j][i] = chr_d[i][j]
if d is None:
d = chr_d
else:
d += chr_d
d /= len(self.chromosomes)
mds = sklearn_MDS(n_PCs, dissimilarity='precomputed')
X = mds.fit_transform(d)
label=kmeans(X,n_clusters,kwargs.pop('weights',None),kwargs.pop('iteration',1000))
return X[:,:dim], label
def scHiCluster(self,dim=2,n_clusters=4,cutoff=0.8,n_PCs=10,**kwargs):
"""
Embedding and clustering single cells using HiCluster.
Reference:
Zhou J, Ma J, Chen Y, Cheng C, Bao B, Peng J, et al.
Robust single-cell Hi-C clustering by convolution- and random-walk–based imputation.
PNAS. 2019 Jul 9;116(28):14011–8.
Parameters
----------
dim : int, optional
Number of dimension of embedding. The default is 2.
n_clusters : int, optional
Number of clusters. The default is 4.
cutoff : float, optional
The cutoff proportion to convert the real contact
matrix into binary matrix. The default is 0.8.
n_PCs : int, optional
Number of principal components. The default is 10.
**kwargs :
Other arguments passed to kmeans.
See `scHiCTools.analysis.clustering.kmeans` function.
Returns
-------
embeddings : numpy.ndarray
The embedding of cells using HiCluster.
label : numpy.ndarray
An array of cell labels clustered by HiCluster.
"""
if self.full_maps is None:
raise ValueError('No full maps stored. scHiCluster is not doable.')
def kth_diag_indices(a, k):
rows, cols = np.diag_indices_from(a)
if k < 0:
return rows[-k:], cols[:k]
elif k > 0:
return rows[:-k], cols[k:]
else:
return rows, cols
X=None
for ch in tqdm(self.chromosomes):
all_strata = self.full_maps[ch].copy()
if self.keep_n_strata is None:
#print('HiCluster processing chromosomes {}'.format(ch))
A = all_strata
elif self.keep_n_strata >= all_strata.shape[-1]:
#print('HiCluster processing chromosomes (no strata to filter) {}'.format(ch))
A = all_strata
else:
#print('HiCluster processing chromosomes and filtering strata {}'.format(ch))
A = np.zeros_like(all_strata)
for cell_i in range(A.shape[0]):
cell_A = all_strata[cell_i]
for k in range(self.strata_offset, self.keep_n_strata):
#k += self.strata_offset
strata_rows, strata_cols = kth_diag_indices(cell_A, k)
s = np.diag(cell_A, k=k)
if self.strata_downsample is not None:
if k in self.strata_downsample.keys():
downsample_percent = self.strata_downsample[k]
old_count = np.sum(s)
if old_count == 0:
continue
new_count = int(old_count * downsample_percent)
probs = np.array(s) / old_count
if np.any(np.isnan(probs)):
A[cell_i, strata_rows, strata_cols] = s
A[cell_i, strata_cols, strata_rows] = s
else:
sampled_i = np.random.choice(np.arange(0, s.size), size=new_count, replace=True, p=probs)
new_stratum = np.zeros_like(s)
for i in sampled_i:
new_stratum[i] += 1
A[cell_i, strata_rows, strata_cols] = new_stratum
A[cell_i, strata_cols, strata_rows] = new_stratum
else:
A[cell_i, strata_rows, strata_cols] = s
A[cell_i, strata_cols, strata_rows] = s
else:
A[cell_i, strata_rows, strata_cols] = s
A[cell_i, strata_cols, strata_rows] = s
if len(A.shape)==3:
n=A.shape[1]*A.shape[2]
A.shape=(A.shape[0],n)
A=np.quantile(A,cutoff,axis=1)<np.transpose(A)
#A = (A - np.mean(A, axis=1)) / np.std(A, axis=1) # standardize inputs
A = PCA(A.T,n_PCs)
if X is None:
X=A
else:
X=np.append(X, A, axis=1)
X=PCA(X,n_PCs)
X = np.nan_to_num(X)
try:
label=kmeans(X,n_clusters,kwargs.pop('weights',None),kwargs.pop('iteration',1000))
except ValueError:
print('NaN probabilities found when running K-means...')
label = np.zeros(X.shape[0])
return X[:,:dim], label
def learn_embedding(self, similarity_method, embedding_method,
dim=2, aggregation='median', n_strata=None, return_distance=False, print_time=False, distance_matrix_viz=None, row_colors=None,
**kwargs):
"""
Function to find a low-dimensional embedding for cells.
Parameters
----------
similarity_method : str
The method used to calculate similarity matrix.
Now support 'inner_product', 'HiCRep' and 'Selfish'.
embedding_method : str
The method used to project cells into lower-dimensional space.
Now support 'MDS', 'tSNE', 'phate', 'spectral_embedding'.
dim : int, optional
Dimension of the embedding space.
The default is 2.
aggregation : str, optional
Method to find the distance matrix based on distance matrices of chromesomes.
Must be 'mean' or 'median'.
The default is 'median'.
n_strata : int, optional
Number of strata used in calculation.
The default is None.
return_distance : bool, optional
Whether to return the distance matrix of cells.
If True, return (embeddings, distance_matrix);
if False, only return embeddings.
The default is False.
print_time : bool, optional
Whether to print process time. The default is False.
**kwargs :
Including two arguments for Selfish
(see funciton `pairwise_distances`):\
`n_windows`: number of Selfish windows\
`sigma`: sigma in the Gaussian-like kernel\
and some arguments specify different embedding method
(see functions in `scHiCTools.embedding.embedding`).
Returns
-------
embeddings: numpy.ndarray
The embedding of cells in lower-dimensional space.
final_distance: numpy.ndarray, optional
The pairwise distance calculated.
"""
if self.distance is None or self.similarity_method!=similarity_method:
self.similarity_method=similarity_method
distance_matrices = []
assert embedding_method.lower() in ['mds', 'tsne', 'umap', 'phate', 'spectral_embedding']
if not self.store_full_map:
assert n_strata is not None or self.keep_n_strata is not None
n_strata = n_strata if n_strata is not None else self.keep_n_strata
new_strata = self.cal_strata(n_strata)
#print('Strata only')
else:
#print('Full map')
n_strata = n_strata if n_strata is not None else self.keep_n_strata
new_strata = self.cal_strata(n_strata)
#new_strata = self.strata=
if print_time:
time1=0
time2=0
for ch in tqdm(self.chromosomes):
distance_mat,t1,t2 = pairwise_distances(new_strata[ch], similarity_method, print_time, kwargs.get('sigma',.5), kwargs.get('window_size',10))
time1=time1+t1
time2=time2+t2
distance_matrices.append(distance_mat)
print('Sum of time 1:', time1)
print('Sum of time 2:', time2)
else:
for ch in tqdm(self.chromosomes):
if ch is not None and new_strata is not None:
distance_mat = pairwise_distances(new_strata[ch],
similarity_method,
print_time,
kwargs.get('sigma',.5),
kwargs.get('window_size',10))
distance_matrices.append(distance_mat)
self.distance = np.array(distance_matrices)
if aggregation == 'mean':
final_distance = np.mean(self.distance, axis=0)
elif aggregation == 'median':
final_distance = np.median(self.distance, axis=0)
else:
raise ValueError('Aggregation method {0} not supported. Only "mean" or "median".'.format(aggregation))
np.fill_diagonal(final_distance, 0)
if distance_matrix_viz is not None:
im = plt.matshow(final_distance, cmap='Blues')
plt.colorbar(im)
plt.savefig(distance_matrix_viz + '.png')
plt.close()
lk = linkage(final_distance, method='average')
sns.clustermap(final_distance, row_linkage=lk, col_linkage=lk, row_colors=row_colors)
plt.savefig(distance_matrix_viz + '_cluster.png')
plt.close()
embedding_method = embedding_method.lower()
if embedding_method == 'mds':
embeddings = MDS(final_distance, dim)
elif embedding_method == 'tsne':
embeddings = tSNE(final_distance, dim,
kwargs.pop('perp',30),
kwargs.pop('iteration',1000),
kwargs.pop('momentum', 0.5),
kwargs.pop('rate', 200),
kwargs.pop('tol',1e-5))
# elif embedding_method == 'umap':
# embeddings = UMAP(final_distance, dim,
# kwargs.pop('n',5),
# kwargs.pop('min_dist',1),
# kwargs.pop('n_epochs',10),
# kwargs.pop('alpha',1),
# kwargs.pop('n_neg_samples',0))
elif embedding_method == 'phate':
embeddings = PHATE(final_distance, dim,
kwargs.pop('k',5),
kwargs.pop('a',1),
kwargs.pop('gamma',1),
kwargs.pop('t_max',100),
kwargs.pop('momentum',.1),
kwargs.pop('iteration',1000))
elif embedding_method == 'spectral_embedding':
graph=np.exp(-np.square(final_distance)/np.mean(final_distance**2))
graph = graph-np.diag(graph.diagonal())
embeddings = SpectralEmbedding(graph, dim)
else:
raise ValueError('Embedding method {0} not supported. '.format(embedding_method))
if return_distance:
return embeddings, final_distance
else:
return embeddings
def clustering(self,
n_clusters,
clustering_method,
similarity_method,
aggregation='median',
n_strata=None,
print_time=False,
**kwargs):
"""
Parameters
----------
n_clusters : int
Number of clusters.
clustering_method : str
Clustering method in 'kmeans', 'spectral_clustering' or 'HAC'(hierarchical agglomerative clustering).
similarity_method : str
Reproducibility measure.
Value in ‘InnerProduct’, ‘HiCRep’ or ‘Selfish’.
aggregation : str, optional
Method to aggregate different chromosomes.
Value is either 'mean' or 'median'.
The default is 'median'.
n_strata : int or None, optional
Only consider contacts within this genomic distance.
If it is None, it will use the all strata kept from previous loading process.
The default is None.
print_time : bool, optional
Whether to print the processing time. The default is False.
**kwargs :
Other arguments pass to function `scHiCTools.embedding.reproducibility.pairwise_distances `,
and the clustering function in `scHiCTools.analysis.clustering`.
Returns
-------
label : numpy.ndarray
An array of cell labels clustered.
"""
if self.distance is None or self.similarity_method!=similarity_method:
self.similarity_method=similarity_method
distance_matrices = []
assert n_strata is not None or self.keep_n_strata is not None
n_strata = n_strata if n_strata is not None else self.keep_n_strata
new_strata = self.cal_strata(n_strata)
for ch in tqdm(self.chromosomes):
distance_mat = pairwise_distances(new_strata[ch],
similarity_method,
print_time,
kwargs.get('sigma',.5),
kwargs.get('window_size',10))
distance_matrices.append(distance_mat)
self.distance = np.array(distance_matrices)
if aggregation == 'mean':
final_distance = np.mean(self.distance, axis=0)
elif aggregation == 'median':
final_distance = np.median(self.distance, axis=0)
else:
raise ValueError('Aggregation method {0} not supported. Only "mean" or "median".'.format(aggregation))
np.fill_diagonal(final_distance, 0)
clustering_method=clustering_method.lower()
if clustering_method=='kmeans':
embeddings = MDS(final_distance, n_clusters)
label=kmeans(embeddings,
k=n_clusters,
**kwargs)
elif clustering_method=='spectral_clustering':
label=spectral_clustering(final_distance,
data_type='distance_matrix',
n_clusters=n_clusters,
**kwargs)
elif clustering_method=='hac':
label=HAC(final_distance,
'distance_matrix',
n_clusters,
kwargs.pop('method','centroid'))
else:
raise ValueError('Embedding method {0} not supported. '.format(clustering_method))
return label
|
the-stack_0_18653 | #
# Copyright 2014 Thomas Rabaix <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ioc.loader, ioc.component, ioc.exceptions
from ioc.component import Definition
class Extension(ioc.component.Extension):
def load(self, config, container_builder):
extensions = container_builder.parameters.get('ioc.extensions')
locator_map = {}
for extension in extensions:
locator_map[extension] = Definition('ioc.locator.ChoiceLocator',
arguments=[[
Definition('ioc.locator.FileSystemLocator', arguments=["%s/resources/%s" % (container_builder.parameters.get('project.root_folder'), extension)]),
Definition('ioc.locator.PackageLocator', arguments=[extension], kwargs={'package_path': 'resources'})
]]
)
container_builder.add('ioc.locator', Definition('ioc.locator.PrefixLocator', arguments=[locator_map], kwargs={'delimiter': ':'}))
|
the-stack_0_18654 | import os
import unittest
import shutil
import sys
import tempfile
import bpy
import addon_utils
class BaseTestCase(unittest.TestCase):
__save_test_data = '--save-test-data' in sys.argv
__tmp_base = os.path.join(tempfile.gettempdir(), 'animation_retarget-tests')
__tmp = __tmp_base + '/out'
@classmethod
def outpath(cls, path=''):
if not os.path.exists(cls.__tmp):
os.makedirs(cls.__tmp)
return os.path.join(cls.__tmp, path)
def setUp(self):
self._reports = []
bpy.ops.wm.read_homefile()
addon_utils.enable('animation_retarget', default_set=True)
def tearDown(self):
if os.path.exists(self.__tmp):
if self.__save_test_data:
bpy.ops.wm.save_mainfile(filepath=os.path.join(self.__tmp, 'result.blend'))
new_path = os.path.join(
self.__tmp_base,
self.__class__.__name__,
self._testMethodName
)
os.renames(self.__tmp, new_path)
else:
shutil.rmtree(self.__tmp)
addon_utils.disable('animation_retarget')
|
the-stack_0_18655 | import logging
import os.path
import pathlib
import time
from functools import wraps
import numpy as np
import ophyd.sim
import pydm
import pytest
import qtpy
from happi import Client
from ophyd import Component as Cpt
from ophyd import Device
from ophyd import FormattedComponent as FC
from ophyd.sim import Signal, SynAxis, SynPeriodicSignal
from pydm import PyDMApplication
from pydm.data_plugins import plugin_for_address
from pydm.widgets.logdisplay import GuiHandler
from qtpy import QtGui, QtWidgets
import typhos
from typhos.plugins.core import signal_registry
from typhos.plugins.happi import register_client
from typhos.utils import SignalRO, TyphosBase
logger = logging.getLogger(__name__)
# Global testing variables
show_widgets = False
application = None
MODULE_PATH = pathlib.Path(__file__).parent
# Patch TyphosConsole on TyphosSuite. Creation of more than one QtConsole
# quicky in the test suite causes instabilities
typhos.TyphosSuite.default_tools['Console'] = TyphosBase
def pytest_addoption(parser):
parser.addoption("--dark", action="store_true", default=False,
help="Use the dark stylesheet to display widgets")
parser.addoption("--show-ui", action="store_true", default=False,
help="Show the widgets produced by each test")
# Create a fixture to configure whether widgets are shown or not
@pytest.fixture(scope='session', autouse=True)
def _show_widgets(pytestconfig):
global show_widgets
show_widgets = pytestconfig.getoption('--show-ui')
if show_widgets:
logger.info("Running tests while showing created widgets ...")
@pytest.fixture(scope='session', autouse=True)
def qapp(pytestconfig):
global application
application = QtWidgets.QApplication.instance()
if application is None:
application = PyDMApplication(use_main_window=False)
typhos.use_stylesheet(pytestconfig.getoption('--dark'))
return application
@pytest.fixture(scope='function', autouse=True)
def noapp(monkeypatch):
monkeypatch.setattr(QtWidgets.QApplication, 'exec_', lambda x: 1)
monkeypatch.setattr(QtWidgets.QApplication, 'exit', lambda x: 1)
monkeypatch.setattr(
pydm.exception, 'raise_to_operator', lambda *_, **__: None
)
@pytest.fixture(scope='session')
def test_images():
return (os.path.join(os.path.dirname(__file__), 'utils/lenna.png'),
os.path.join(os.path.dirname(__file__), 'utils/python.png'))
def save_image(widget, name, delay=0.5):
'''
Save `widget` to typhos/tests/artifacts/{name}.png after `delay` seconds.
'''
widget.show()
app = QtWidgets.QApplication.instance()
end_time = time.time() + delay
while time.time() < end_time:
app.processEvents()
time.sleep(0.1)
image = QtGui.QImage(widget.width(), widget.height(),
QtGui.QImage.Format_ARGB32_Premultiplied)
image.fill(qtpy.QtCore.Qt.transparent)
pixmap = QtGui.QPixmap(image)
painter = QtGui.QPainter(pixmap)
widget.render(image)
painter.end()
artifacts_path = MODULE_PATH / 'artifacts'
artifacts_path.mkdir(exist_ok=True)
path = str(artifacts_path / f'{name}.png')
image.save(path)
logger.debug('saved image to %s', path)
def show_widget(func):
"""
Show a widget returned from arbitrary `func`
"""
@wraps(func)
def func_wrapper(*args, **kwargs):
# Run function grab widget
widget = func(*args, **kwargs)
if widget is not None:
save_image(widget, func.__name__)
if show_widgets:
# Display the widget
widget.show()
# Start the application
application.exec_()
return func_wrapper
@pytest.fixture(scope='session')
def motor():
# Register all signals
for sig in ophyd.sim.motor.component_names:
typhos.register_signal(getattr(ophyd.sim.motor, sig))
return ophyd.sim.motor
class RichSignal(Signal):
def __init__(self, *args, metadata=None, **kwargs):
if metadata is None:
metadata = {
'enum_strs': ('a', 'b', 'c'),
'precision': 2,
'units': 'urad',
}
super().__init__(*args, metadata=metadata, **kwargs)
def describe(self):
desc = super().describe()
desc[self.name].update(self.metadata)
return desc
def update_metadata(self, md):
self._metadata.update(md)
self._run_metadata_callbacks()
class DeadSignal(Signal):
subscribable = False
def subscribe(self, *args, **kwargs):
if self.subscribable:
pass
else:
raise TimeoutError("Timeout on subscribe")
def get(self, *args, **kwargs):
raise TimeoutError("Timeout on get")
def describe(self, *args, **kwargs):
raise TimeoutError("Timeout on describe")
class ConfiguredSynAxis(SynAxis):
velocity = Cpt(Signal, value=100, kind='normal')
acceleration = Cpt(Signal, value=10, kind='normal')
resolution = Cpt(Signal, value=5, kind='normal')
class RandomSignal(SynPeriodicSignal):
"""
Signal that randomly updates a random integer
"""
def __init__(self, *args, **kwargs):
super().__init__(func=lambda: np.random.uniform(0, 100),
period=10, period_jitter=4, **kwargs)
class MockDevice(Device):
# Device signals
readback = Cpt(RandomSignal, kind='normal')
noise = Cpt(RandomSignal, kind='normal')
transmorgifier = Cpt(SignalRO, value=4, kind='normal')
setpoint = Cpt(Signal, value=0, kind='normal')
velocity = Cpt(Signal, value=1, kind='config')
flux = Cpt(RandomSignal, kind='config')
modified_flux = Cpt(RandomSignal, kind='config')
capacitance = Cpt(RandomSignal, kind='config')
acceleration = Cpt(Signal, value=3, kind='config')
limit = Cpt(Signal, value=4, kind='config')
inductance = Cpt(RandomSignal, kind='normal')
transformed_inductance = Cpt(SignalRO, value=3, kind='omitted')
core_temperature = Cpt(RandomSignal, kind='omitted')
resolution = Cpt(Signal, value=5, kind='omitted')
duplicator = Cpt(Signal, value=6, kind='omitted')
# Component Motors
x = FC(ConfiguredSynAxis, name='X Axis')
y = FC(ConfiguredSynAxis, name='Y Axis')
z = FC(ConfiguredSynAxis, name='Z Axis')
def insert(self, width: float = 2.0, height: float = 2.0,
fast_mode: bool = False):
"""Fake insert function to display"""
pass
def remove(self, height: float, fast_mode: bool = False):
"""Fake remove function to display"""
pass
@property
def hints(self):
return {'fields': [self.name+'_readback']}
@pytest.fixture(scope='function')
def device():
dev = MockDevice('Tst:This', name='Simulated Device')
yield dev
clear_handlers(dev)
def clear_handlers(device):
if isinstance(device.log, logging.Logger):
_logger = device.log
else:
_logger = device.log.logger
for handler in list(_logger.handlers):
if isinstance(handler, GuiHandler):
_logger.handlers.remove(handler)
@pytest.fixture(scope='session')
def client():
client = Client(path=os.path.join(os.path.dirname(__file__),
'happi.json'))
register_client(client)
return client
@pytest.fixture(scope='session')
def happi_cfg():
path = str(MODULE_PATH / 'happi.cfg')
os.environ['HAPPI_CFG'] = path
return path
def reset_signal_plugin():
"""
Completely restart the sig:// plugin.
After the restart, there will be no open SignalConnection objects
and nothing in the signal registry.
Some tests are easier to express by repeating signal names, which
will cause the signal plugin to ignore the new devices in favor of
the previously saved devices, which are no longer available to be
manipulated or tested.
"""
signal_registry.clear()
plugin = plugin_for_address('sig://test')
for channel in list(plugin.channels):
plugin.remove_connection(channel)
|
the-stack_0_18656 | #!/usr/bin/env python3
import os, sys, codecs, argparse
try:
import http.server as httpserver, socketserver
except ImportError:
import SimpleHTTPServer as httpserver, SocketServer as socketserver
from .SlidesParser import SlidesParser
from .HandoutParser import HandoutParser
from ._version import __version__ as version
class Rstslide:
@staticmethod
def run():
# Test the presence of Pygments
isPygments = False
try:
from pygments.styles import STYLE_MAP
isPygments = True
except BaseException:
print("Pygments is not installed, code blocks won't be highlighted")
# Allowed themes and transitions
themes = ['default', 'beige', 'night']
transitions = ['default', 'cube', 'page', 'concave', 'zoom', 'linear', 'fade', 'none']
options = ['input_file', 'output_file', 'theme', 'transition', 'mathjax_path', 'pygments_style']
if isPygments:
pygments_styles = STYLE_MAP.keys()
# Define arguments
argparser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
argparser.description = "reStructuredText to HTML slide generator using reveal.js."
argparser.add_argument("input_file", help="The name of the reStructuredText file to parse.")
argparser.add_argument("-o", "--output_file", type=str, help="The name of the HTML file to produce (by default the same basename as the input file with a .html suffix.")
argparser.add_argument("-u", "--update_file", type=str, help="The name of a previously generated HTML file for updating only the headers part.")
argparser.add_argument("-t", "--theme", type=str, help="Set rstslide theme (overrides theme set in input file)")
mode = argparser.add_mutually_exclusive_group(required=False)
mode.add_argument("-S", "--slide", action='store_true', help="Normal slide mode (default).")
mode.add_argument("-P", "--pdf", action='store_true', help="Pdf rendering mode (run and follow instructions).")
mode.add_argument("-N", "--pdf-with-notes", action='store_true', help="Pdf rendering mode with notes (run and follow instructions).")
mode.add_argument("-H", "--handout", action='store_true', help="Handout rendering mode.")
mode.add_argument("-X", "--handout-alt", action='store_true', help="An alternative handout rendering mode that uses the default html5 docutils converter.")
argparser.add_argument("-s", "--serve", action='store_true', help="Start webserver that serves the slides.")
argparser.add_argument('-v', '--version', action='version', version='rstslide ' + version)
argparser.add_argument('-d', '--debug', action='store_true', help="Write debug output on stdout")
resource_mgmt_choices = {
'central': "Use centralized resources from where rstslide is installed",
'local': "Copy needed resources to a directory <outfile>-resources",
'inline': "Embedd all resources into a single file HTML document"
}
argparser.add_argument("-r", "--resources", type=str, choices=resource_mgmt_choices,
help="How to handle resources that the presentation is dependent on:\n" +
'\n'.join("{}: {}".format(key, value) for key, value in resource_mgmt_choices.items()))
args = argparser.parse_args()
# input file name
filename = args.input_file
# output file name
if args.output_file:
output_file = args.output_file
else:
output_file = filename.split('.')[-2]+'.html'
mode = 'slide'
notes = False
if args.pdf or args.pdf_with_notes:
mode = 'print'
notes = True
elif args.handout_alt:
mode = 'handout-alt'
elif args.handout:
mode = 'handout'
if (args.pdf or args.serve) and args.resources is None:
args.resources = 'local'
if (args.pdf or args.serve) and args.resources == 'central':
print("Pdf printing or serving does not work with resource option 'central', exiting.")
exit(1)
# Create the RST parser and create the slides
if mode != 'handout-alt':
parser = SlidesParser(input_file=filename, output_file=output_file, theme=args.theme, resources=args.resources, mode=mode, notes=notes, debug=args.debug)
parser.create_slides()
else:
parser = HandoutParser(input_file=filename, output_file=output_file, theme=args.theme, resources=args.resources, mode=mode, notes=notes, debug=args.debug)
parser.create_handout()
if args.pdf or args.serve:
port = 8000
handler = httpserver.SimpleHTTPRequestHandler
httpd = socketserver.TCPServer(("", port), handler)
if args.serve:
print("The slides are served here:")
print(" http://127.0.0.1:8000/"+output_file+"?print-pdf")
print("To end serving the slides, end the program with Ctrl+C")
else:
print("Follow the following steps to export the slides as pdf:")
print("1. Open the following URL in your web browser:")
print(" http://127.0.0.1:8000/"+output_file+"?print-pdf")
print("2. Wait for everything to load and then print to pdf")
print(" (this is known to work in Google Chrome and similar browsers.)")
print("3. End this program with Ctrl+C")
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass
finally:
# Clean-up server (close socket, etc.)
httpd.server_close()
|
the-stack_0_18657 | import os
from setuptools import setup
def read(fname):
with open(os.path.join(os.path.dirname(__file__), fname)) as f:
return f.read()
setup(
name="samplify-python",
version="0.0.9.13",
author="Samplify Limited",
author_email="[email protected]",
description="Python wrapper for Samplify API",
license="MIT",
keywords="",
url="https://samplify.io",
packages=['samplify'],
package_data={},
test_suite='',
long_description=read('README.rst'),
install_requires=[
'requests'
],
tests_require=[
],
classifiers=[
],
)
|
the-stack_0_18659 | import os
from os.path import join, dirname
from dotenv import load_dotenv
import vonage
dotenv_path = join(dirname(__file__), "../.env")
load_dotenv(dotenv_path)
VONAGE_APPLICATION_ID = os.environ.get("VONAGE_APPLICATION_ID")
VONAGE_APPLICATION_PRIVATE_KEY_PATH = os.environ.get("VONAGE_APPLICATION_PRIVATE_KEY_PATH")
VONAGE_CALL_UUID = os.environ.get("UUID")
client = vonage.Client(
application_id=VONAGE_APPLICATION_ID,
private_key=VONAGE_APPLICATION_PRIVATE_KEY_PATH
)
voice = vonage.Voice(client)
stream_url = 'https://nexmo-community.github.io/ncco-examples/assets/voice_api_audio_streaming.mp3'
voice.send_audio(VONAGE_CALL_UUID, stream_url=[stream_url])
|
the-stack_0_18661 | """ Simple multi-layer perception neural network using Minpy """
# import minpy
# import minpy.numpy as np
from minpy.core import Function
from minpy.nn import layers
from minpy.nn.model import ModelBase
from minpy.nn.solver import Solver
from minpy.nn.io import NDArrayIter
from examples.utils.data_utils import get_CIFAR10_data
from minpy.context import set_context, gpu
# set_context(gpu(0)) # set the global context as gpu(0)
import mxnet as mx
batch_size = 128
input_size = (3, 32, 32)
flattened_input_size = 3 * 32 * 32
hidden_size = 512
num_classes = 10
class ConvolutionNet(ModelBase):
def __init__(self):
super(ConvolutionNet, self).__init__()
# Define symbols that using convolution and
# max pooling to extract better features
# from input image.
net = mx.sym.Variable(name='X')
net = mx.sym.Convolution(
data=net, name='conv', kernel=(7, 7), num_filter=32)
net = mx.sym.Activation(
data=net, act_type='relu')
net = mx.sym.Pooling(
data=net, name='pool', pool_type='max', kernel=(2, 2),
stride=(2, 2))
net = mx.sym.Flatten(data=net)
# Create forward function and add parameters to this model.
self.conv = Function(
net, input_shapes={'X': (batch_size,) + input_size},
name='conv')
self.add_params(self.conv.get_params())
# Define ndarray parameters used for classification part.
output_shape = self.conv.get_one_output_shape()
conv_out_size = output_shape[1]
self.add_param(name='w1', shape=(conv_out_size, hidden_size)) \
.add_param(name='b1', shape=(hidden_size,)) \
.add_param(name='w2', shape=(hidden_size, num_classes)) \
.add_param(name='b2', shape=(num_classes,))
def forward(self, X, mode):
out = self.conv(X=X, **self.params)
out = layers.affine(out, self.params['w1'], self.params['b1'])
out = layers.relu(out)
out = layers.affine(out, self.params['w2'], self.params['b2'])
return out
def loss(self, predict, y):
return layers.softmax_loss(predict, y)
def main():
# data dir
import os
data_dir = os.path.expandvars('$HOME/data/minpy/cifar-10-batches-py')
# Create model.
model = ConvolutionNet()
# Create data iterators for training and testing sets.
data = get_CIFAR10_data(data_dir)
train_dataiter = NDArrayIter(data=data['X_train'],
label=data['y_train'],
batch_size=batch_size,
shuffle=True)
test_dataiter = NDArrayIter(data=data['X_test'],
label=data['y_test'],
batch_size=batch_size,
shuffle=False)
# Create solver.
solver = Solver(model,
train_dataiter,
test_dataiter,
num_epochs=10,
init_rule='gaussian',
init_config={
'stdvar': 0.001
},
# automatically does the backpropagation
update_rule='sgd_momentum',
optim_config={
'learning_rate': 1e-3,
'momentum': 0.9
},
verbose=True,
print_every=20)
# Initialize model parameters.
solver.init()
# Train!
solver.train()
if __name__ == '__main__':
main()
|
the-stack_0_18662 | from __future__ import absolute_import
from __future__ import print_function
from typing import cast
import sys
import unittest
try:
from tools.lib.css_parser import (
CssParserException,
CssSection,
parse,
)
except ImportError:
print('ERROR!!! You need to run this via tools/test-tools.')
sys.exit(1)
class ParserTestHappyPath(unittest.TestCase):
def test_basic_parse(self):
# type: () -> None
my_selector = 'li.foo'
my_block = '''{
color: red;
}'''
my_css = my_selector + ' ' + my_block
res = parse(my_css)
self.assertEqual(res.text(), my_css)
section = cast(CssSection, res.sections[0])
block = section.declaration_block
self.assertEqual(block.text().strip(), my_block)
declaration = block.declarations[0]
self.assertEqual(declaration.css_property, 'color')
self.assertEqual(declaration.css_value.text().strip(), 'red')
def test_same_line_comment(self):
# type: () -> None
my_css = '''
li.hide {
display: none; /* comment here */
/* Not to be confused
with this comment */
color: green;
}'''
res = parse(my_css)
section = cast(CssSection, res.sections[0])
block = section.declaration_block
declaration = block.declarations[0]
self.assertIn('/* comment here */', declaration.text())
def test_multi_line_selector(self):
# type: () -> None
my_css = '''
h1,
h2,
h3 {
top: 0
}'''
res = parse(my_css)
section = res.sections[0]
selectors = section.selector_list.selectors
self.assertEqual(len(selectors), 3)
def test_comment_at_end(self):
# type: () -> None
'''
This test verifies the current behavior, which is to
attach comments to the preceding rule, but we should
probably change it so the comments gets attached to
the next block, if possible.
'''
my_css = '''
p {
color: black;
}
/* comment at the end of the text */
'''
res = parse(my_css)
self.assertEqual(len(res.sections), 1)
section = res.sections[0]
self.assertIn('comment at the end', section.post_fluff)
def test_media_block(self):
# type: () -> None
my_css = '''
@media (max-width: 300px) {
h5 {
margin: 0;
}
}'''
res = parse(my_css)
self.assertEqual(len(res.sections), 1)
self.assertEqual(res.text(), my_css)
class ParserTestSadPath(unittest.TestCase):
'''
Use this class for tests that verify the parser will
appropriately choke on malformed CSS.
We prevent some things that are technically legal
in CSS, like having comments in the middle of list
of selectors. Some of this is just for expediency;
some of this is to enforce consistent formatting.
'''
def _assert_error(self, my_css, error):
# See https://github.com/python/typeshed/issues/372
# for why we have to ingore types here.
with self.assertRaisesRegexp(CssParserException, error): # type: ignore
parse(my_css)
def test_unexpected_end_brace(self):
# type: () -> None
my_css = '''
@media (max-width: 975px) {
body {
color: red;
}
}} /* whoops */'''
error = 'unexpected }'
self._assert_error(my_css, error)
def test_empty_section(self):
# type: () -> None
my_css = '''
/* nothing to see here, move along */
'''
error = 'unexpected empty section'
self._assert_error(my_css, error)
def test_missing_colon(self):
# type: () -> None
my_css = '''
.hide
{
display none /* no colon here */
}'''
error = 'We expect a colon here'
self._assert_error(my_css, error)
def test_unclosed_comment(self):
# type: () -> None
my_css = ''' /* comment with no end'''
error = 'unclosed comment'
self._assert_error(my_css, error)
def test_missing_selectors(self):
# type: () -> None
my_css = '''
/* no selectors here */
{
bottom: 0
}'''
error = 'Missing selector'
self._assert_error(my_css, error)
def test_disallow_comments_in_selectors(self):
# type: () -> None
my_css = '''
h1,
h2, /* comment here not allowed by Zulip */
h3 {
top: 0
}'''
error = 'Comments in selector section are not allowed'
self._assert_error(my_css, error)
|
the-stack_0_18667 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 8 16:20:49 2017
@author: mikewoodward
This should probably be broken up into seperate objects, one for each
tab. The connect and discionnect buttions are similar, so they should
all probably map to the same functions.
"""
# =============================================================================
# Imports
# =============================================================================
from bokeh.client import push_session
from bokeh.document import Document
from bokeh.layouts import column, layout, row, widgetbox
from bokeh.models import ColumnDataSource, LinearAxis, Range1d
from bokeh.models.widgets import (Button, CheckboxGroup, Div,
Panel, Select,
Tabs, TextInput)
from bokeh.plotting import figure
import datetime
import glob
from math import ceil
import os.path
import pandas as pd
import sys
sys.path.insert(0, r'..')
from UT330.UT330 import UT330
# =============================================================================
# class Display
# =============================================================================
class Display(object):
def __init__(self):
self.page_width = 1200 # The width of the display in the browser
self.page_height = 620 # The height of the display
# The temperature and humidity data used by default and if the
# user selects none in the read file menu
self.default_data = \
pd.DataFrame.from_dict({
'Timestamp': ['2018-07-01 08:00:00', '2018-07-01 08:00:01',
'2018-07-02 08:00:00', '2018-07-03 08:00:00',
'2018-07-03 08:00:01', '2018-07-04 08:00:00',
'2018-07-04 08:00:01', '2018-07-04 08:00:02',
'2018-07-05 08:00:00', '2018-07-06 08:00:00',
'2018-07-06 08:00:01'],
'Temperature (C)': [0, 20.0, 15.0, 20.0, 10.0, 10.0, 20.0,
10.0, 15.0, 10.0, 40.0],
'Relative humidity (%)': [0.0, 36.0, 31.0, 36.0, 26.0, 26.0,
36.0, 26.0, 31.0, 25.0, 40.0],
'Pressure (Pa)': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]})
self.default_data['Timestamp'] = \
pd.to_datetime(self.default_data['Timestamp'])
# We start with the default data
self.data = self.default_data
self.source = ColumnDataSource(data=self.data)
# UT330 device
self.ut330 = UT330()
# Whether the UT330 device is connected or not.
self.device_connected = False
# Text used in dropdown menu if no file selected
self.data_file_none = "No data file selected"
# The Bokeh display is tabbed
self.tabs = Tabs(tabs=[self.intro_tab(),
self.read_file_tab(),
self.h_t_tab(),
self.config_tab(),
self.offset_tab(),
self.time_tab(),
self.device_data_tab()])
# Intro tab
# =========
def intro_tab(self):
"""Intro tab - explains the application"""
html = ("<h1>UT330 UI</h1>"
"<p>"
"This UI controls a UT330 device from any operating "
"system. It displays the temperature and humidity from "
"pre-existing data files downloaded from a UT330 and it "
"enables new data files to be read from a UT330 device "
"connected to the computer. For full details of how the "
"software works (and for licensing), read the "
"<a href='https://github.com/MikeWoodward/UT330B'>"
"Github page</a>."
"</p>"
"<p>"
"Mike Woodward, 2017"
"</p>")
intro_text = Div(text=html,
width=self.page_width,
height=self.page_height)
return Panel(child=widgetbox(intro_text), title="UT330 UI")
# Read tab
# ========
def file_changed(self, attrname, old, new):
"""Helper functions for read_tab - called when user selects new
data file to display """
if new == self.data_file_none:
self.data = self.default_data
else:
self.data = pd.read_csv(new, parse_dates=['Timestamp'])
self.h_t_update()
def scan_folder(self):
"""Helper function for scanning the data files folder"""
pattern = os.path.join('Data', 'UT330_data_*.csv')
files = glob.glob(pattern)
length = len(files)
if 0 == length:
status_text = ("<strong>Error!</strong> There are no data "
"files in the Data folder. ")
else:
status_text = ("There are {0} file(s) in the "
"'Data' folder. ").format(length)
status_text += ("Click <strong>Rescan folder</strong> to rescan the "
"data folder.")
# Ensure we have a 'None' option and that it's the default
files.insert(0, self.data_file_none)
# Update the control
self.file_select.options = files
self.file_select.value = files[0]
self.file_status.text = status_text
def read_file_tab(self):
"""Lets the user choose a data file to read"""
# Drop down list
self.file_select = Select(name='Data files',
value='',
options=[],
title='Data files')
# Status text
self.file_status = Div(text='', width=self.page_width)
# Update the file_select and file_status controls with scan data
self.scan_folder()
# This line is here deliberately. The scan_folder would trigger
# the on-change function and we don't want that first time around.
self.file_select.on_change('value', self.file_changed)
# Re-scan button
file_rescan = Button(label="Rescan folder", button_type="success")
file_rescan.on_click(self.scan_folder)
# Layout
c = column(self.file_select,
self.file_status,
file_rescan)
return Panel(child=c, title="Read from file")
# Config tab
# ==========
def config_read(self):
"""Reads config data to disk"""
if not self.device_connected:
self.config_status.text = ("Cannot read the UT330 device "
"config data "
"because no UT330 device connected.")
return
# Get the config data
if self.config_connected():
# The device has been read OK
self.config_device_read = True
else:
self.config_device_read = False
def config_write(self):
"""Writes config data to disk"""
# Some error checking
if not self.device_connected:
self.config_status.text = ("Cannot write the UT330 config data "
"to disk "
"because there is no UT330 device "
"connected.")
return
if not self.config_device_read:
self.config_status.text = ("You must read the UT330 configuration "
"before before "
"writng different configuration data.")
return
try:
# Get the config data
config = {'device name': self.config_device_name.value,
'sampling interval': int(self.config_sampling.value),
'overwrite records':
self.config_overwrite_records.value == 'True',
'delay timing': int(self.config_delay.value),
'delay start': self.config_delay_start.value == 'Delay',
'high temperature alarm': int(self.config_t_high.value),
'low temperature alarm': int(self.config_t_low.value),
'high humidity alarm': int(self.config_h_high.value),
'low humidity alarm': int(self.config_h_low.value)}
# Write it
self.ut330.write_config(config)
self.config_status.text = ("Wrote configuration data to UT3330 "
"device.")
except ValueError as error:
self.config_status.text = error.args[0]
except:
self.config_status.text = "Error in config_write function."
def config_connect(self):
"""Attempts to connect to device"""
# Look to see if the device already connected
if self.device_connected:
self.config_status.text = ("Cannot connect the UT330 device "
"because the UT330 device is already "
"connected.")
return
# Now try and connect
try:
self.ut330.connect()
self.config_status.text = ("Connected to the UT330 device.")
self.device_connected = True
return
except IOError as error:
self.config_status.text = error.args[0]
self.device_connected = False
return
def config_disconnect(self):
"""Attempts to disconnect from device"""
if not self.device_connected:
self.config_status.text = ("Cannot disconnect the UT330 device "
"because no UT330 device connected.")
return
# Now try and disconnect
try:
self.ut330.disconnect()
self.config_status.text = "Disconnected the UT330 device."
self.config_device_read = False
self.device_connected = False
return
except IOError as error:
self.config_status.text = error.args[0]
return
def config_not_connected(self):
"""UT330 not connected - so update config controls appropriately"""
self.config_status.text = "UT330 device not connected."
self.config_device_name.value = "No device"
self.config_device_time.value = "No device"
self.config_computer_time.value = "No device"
self.config_t_high.value = "No device"
self.config_t_low.value = "No device"
self.config_h_high.value = "No device"
self.config_h_low.value = "No device"
self.config_p_high.value = "No device"
self.config_p_low.value = "No device"
self.config_sampling.value = "No device"
self.config_delay.value = "No device"
self.config_power.value = "No device"
self.config_readings.value = "No device"
def config_connected(self):
"""UT330 connected - so update config controls appropriately"""
try:
now = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
config = self.ut330.read_config()
self.config_status.text = "UT330 device connected."
# Populate the widgets
self.config_device_name.value = config['device name']
self.config_device_time.value = config['timestamp'].strftime(
"%Y-%m-%d %H:%M:%S")
self.config_computer_time.value = now
self.config_t_high.value = str(config['high temperature alarm'])
self.config_t_low.value = str(config['low temperature alarm'])
self.config_h_high.value = str(config['high humidity alarm'])
self.config_h_low.value = str(config['low humidity alarm'])
self.config_p_high.value = 'None'
self.config_p_low.value = 'None'
self.config_sampling.value = str(config['sampling interval'])
self.config_overwrite_records.value = \
'True' if config['overwrite records'] else 'False'
self.config_delay_start.value = \
'Delay' if config['delay start'] else 'No delay'
self.config_delay.value = str(config['delay timing'])
self.config_power.value = str(config['battery power'])
self.config_readings.value = \
"{0} of {1}".format(config['readings count'],
config['readings limit'])
return True
except:
self.config_status.text = "Exception raised in config_connected."
return False
def config_tab(self):
"""Reading/writing device configuration"""
# True if the config device data has been read, false otherwise
self.config_device_read = False
# Device connectivity
# ===================
config_conn_head = Div(text="<strong>Connectivity</strong>")
self.config_status = Div(text="", width=self.page_width)
config_connect = Button(label='Connect to UT330',
button_type="success")
config_read = Button(label='Read config', button_type="success")
config_write = Button(label='Write config', button_type="success")
config_disconnect = Button(label='Disconnect from UT330',
button_type="success")
config_connect.on_click(self.config_connect)
config_read.on_click(self.config_read)
config_write.on_click(self.config_write)
config_disconnect.on_click(self.config_disconnect)
# Show the configuration data
# ===========================
# Set up the widgets
config_device_head = Div(text="<strong>Configuration</strong>")
self.config_device_name = TextInput(title="Device name")
self.config_device_time = TextInput(title="Device time")
self.config_computer_time = TextInput(title="Computer time")
self.config_t_high = TextInput(title="High temperature alarm (C)")
self.config_t_low = TextInput(title="Low temperature alarm (C)")
self.config_h_high = TextInput(title="High humidity alarm (%RH)")
self.config_h_low = TextInput(title="Low humidity alarm (%RH)")
self.config_p_high = TextInput(title="High pressure alarm")
self.config_p_low = TextInput(title="Low pressure alarm")
self.config_sampling = TextInput(title="Sampling interval (s)")
self.config_overwrite_records = Select(title="Overwrite records",
options=['False', 'True'])
self.config_delay_start = Select(title="Delay start",
options=['No delay', 'Delay'])
self.config_delay = TextInput(title="Delay (s)")
# Status data
# ===========
config_status_head = Div(text="<strong>Status</strong>")
self.config_power = TextInput(title="Battery power (%)")
self.config_readings = TextInput(title="Readings")
# Disable user input for these widgets
self.config_power.disabled = True
self.config_readings.disabled = True
# Values to widgets
# =================
if self.device_connected:
self.config_connected()
else:
self.config_not_connected()
# Set up the display
layout = column(row(config_conn_head),
row(self.config_status),
row(config_connect,
config_read,
config_write,
config_disconnect),
row(config_device_head),
row(self.config_device_name,
self.config_device_time,
self.config_computer_time),
row(self.config_t_low,
self.config_h_low,
self.config_p_low),
row(self.config_t_high,
self.config_h_high,
self.config_p_high),
row(self.config_sampling),
row(self.config_overwrite_records,
self.config_delay_start,
self.config_delay),
row(config_status_head),
row(self.config_power, self.config_readings))
return Panel(child=layout,
title="Read/write configuration")
# Offset tab
# ==========
def offset_connect(self):
"""Connect the UT330 device"""
# Look to see if the device already connected
if self.device_connected:
self.offset_status.text = ("Cannot connect the UT330 device "
"because the UT330 device is already "
"connected.")
return
# Now try and connect
try:
self.ut330.connect()
self.offset_status.text = ("Connected to the UT330 device.")
self.device_connected = True
except IOError as error:
self.offset_status.text = error.args[0]
self.device_connected = False
except:
self.offset_status.text = "Exception raised in offset_connect."
self.device_connected = False
def offset_disconnect(self):
"""Disconnect the UT330 device"""
if not self.device_connected:
self.offset_status.text = ("Cannot disconnect the UT330 device "
"because no UT330 device connected.")
return
# Now try and disconnect
try:
self.ut330.disconnect()
self.offset_status.text = "Disconnected the UT330 device."
self.offset_device_read = False
self.device_connected = False
except IOError as error:
self.offset_status.text = error.args[0]
except:
self.offset_status.text = "Exception raised in offset_disconnect."
def offset_read(self):
"""Reads offset data to disk"""
if not self.device_connected:
self.offset_status.text = ("Cannot read the UT330 device "
"offset data "
"because no UT330 device connected.")
return
# Get the config data
if self.offset_connected():
# The device has been read OK
self.offset_device_read = True
else:
self.offset_device_read = False
def offset_write(self):
"""Writes offset data to disk"""
if not self.device_connected:
self.offset_status.text = ("Cannot write the UT330 offset data "
"to disk "
"because there is no UT330 device "
"connected.")
return
if not self.offset_device_read:
self.offset_status.text = ("You must read the UT330 offset "
"before before "
"writng different offset data.")
return
try:
# Get the offset data
offset = {'temperature offset': float(self.offset_t.value),
'humidity offset': float(self.offset_h.value),
'pressure offset': float(self.offset_p.value)}
# Write it
self.ut330.write_offsets(offset)
self.offset_status.text = ("Wrote offset data to UT3330 "
"device.")
except ValueError as error:
self.offset_status.text = error.args[0]
except:
self.offset_status.text = "Exception in offset_write function."
def offset_not_connected(self):
"""UT330 not connected - so update offset controls appropriately"""
self.config_status.text = "UT330 device not connected."
self.offset_t_current.value = "No device"
self.offset_h_current.value = "No device"
self.offset_p_current.value = "No device"
self.offset_t.value = "No device"
self.offset_h.value = "No device"
self.offset_p.value = "No device"
def offset_connected(self):
"""UT330 connected - so update offset controls appropriately"""
try:
self.config_status.text = ("UT330 device connected and offsets "
"read.")
offsets = self.ut330.read_offsets()
self.offset_t_current.value = str(offsets['temperature'])
self.offset_h_current.value = str(offsets['humidity'])
self.offset_p_current.value = str(offsets['pressure'])
self.offset_t.value = str(offsets['temperature offset'])
self.offset_h.value = str(offsets['humidity offset'])
self.offset_p.value = str(offsets['pressure offset'])
return True
except:
self.config_status.text = "UT330 device not connected."
return False
def offset_tab(self):
"""Reading/writing device offsets"""
# True if the offset device data has been read, false otherwise
self.offset_device_read = False
offset_status_h = Div(text="<strong>Status</strong>")
self.offset_status = Div(text="", width=self.page_width)
# Connect to device button
# ========================
offset_controls_h = Div(text="<strong>Device controls</strong>")
offset_connect = Button(label='Connect to UT330',
button_type="success")
offset_read = Button(label='Read offset', button_type="success")
offset_write = Button(label='Write offset', button_type="success")
offset_disconnect = Button(label='Disconnect from UT330',
button_type="success")
offset_connect.on_click(self.offset_connect)
offset_read.on_click(self.offset_read)
offset_write.on_click(self.offset_write)
offset_disconnect.on_click(self.offset_disconnect)
# Offsets
# =======
offset_offsets_h = Div(text="<strong>Offsets</strong>")
self.offset_t_current = TextInput(title="Temperature current")
self.offset_h_current = TextInput(title="Humidity current")
self.offset_p_current = TextInput(title="Pressure current")
self.offset_t = TextInput(title="Temperature offset")
self.offset_h = TextInput(title="Humidity offset")
self.offset_p = TextInput(title="Pressure offset")
# Values to widgets
# =================
if self.device_connected:
self.offset_connected()
else:
self.offset_not_connected()
if self.device_connected:
self.offset_status.text = ('UT330 device connected. The Read, '
'Write, and Disconnect buttons '
'will work.')
else:
self.offset_status.text = ('UT330 device is <strong>NOT</strong> '
'connected. The '
'Read, Write, and Disconnect buttons '
'will <strong>not work</strong>. '
'Click the '
'Connect button if the UT330 is '
'connected on a USB port.')
# Layout
# ======
l = layout([[offset_status_h],
[self.offset_status],
[offset_controls_h],
[offset_connect,
offset_read,
offset_write,
offset_disconnect],
[offset_offsets_h],
[self.offset_t_current,
self.offset_h_current,
self.offset_p_current],
[self.offset_t,
self.offset_h,
self.offset_p]],
width=self.page_width)
return Panel(child=l,
title="Read/write offset")
# Data tab
# ========
def data_connect(self):
"""Connects to the device"""
if self.device_connected:
self.data_status.text = ("Cannot connect the UT330 device "
"because UT330 device already connected.")
return
# Now try and connect
try:
self.ut330.connect()
self.data_status.text = "Connected to the UT330 device."
self.device_connected = True
except IOError as error:
self.data_status.text = error.args[0]
self.device_connected = False
except:
self.data_status.text = "Exception raised in data_connect."
self.device_connected = False
def data_disconnect(self):
"""Disconnects from the device"""
if not self.device_connected:
self.data_status.text = ("Cannot disconnect the UT330 device "
"because no UT330 device connected.")
return
# Now try and disconnect
try:
self.ut330.disconnect()
self.data_status.text = "Disconnected the UT330 device."
self.device_connected = False
except IOError as error:
self.data_status.text = error.args[0]
except:
self.data_status.text = "Exception raised in data_disconnect."
def data_read(self):
"""Reads data from device"""
if not self.device_connected:
self.data_status.text = ("Cannot read the UT330 device "
"because no UT330 device connected.")
return
try:
self.data_status.text = "Reading in data from UT330..."
data = self.ut330.read_data()
count = len(data)
if 0 == count:
self.data_status.text = "No data to read on device."
return
self.data = pd.DataFrame(data)
self.h_t_update()
self.data_status.text = \
"{0} lines of data read in from UT330.".format(count)
except:
self.data_status.text = "Exception in data_read."
def data_write(self):
"""Writes data to disk"""
if not self.device_connected:
self.data_status.text = ("Cannot write the UT330 device data "
"to disk "
"because there is no UT330 device "
"connected.")
return
try:
data = self.ut330.read_data()
self.data = pd.DataFrame(data)
self.h_t_update()
timestamp = data[0]['Timestamp']
data_file = \
os.path.join('Data',
'UT330_data_{0}.csv'.
format(timestamp.strftime("%Y%m%d_%H%M%S")))
self.data.to_csv(data_file)
self.data_status.text = "Wrote data to file {0}.".format(data_file)
except:
self.data_status.text = "Exception in data_write."
def data_erase(self):
"""Erases device data"""
if not self.device_connected:
self.data_status.text = ("Cannot erase the UT330 device data "
"because no UT330 device connected.")
return
try:
self.ut330.delete_data()
self.data_status.text = "UT330 data erased."
except:
self.data_status.text = "Exception in data_erase."
def device_data_tab(self):
"""Reading device data"""
self.data_status = Div(text="", width=self.page_width)
data_connect = Button(label='Connect to UT330',
button_type="success")
data_read = Button(label='Read data',
button_type="success")
data_write = Button(label='Write data to disk',
button_type="success")
data_erase = Button(label='Erase data',
button_type="success")
data_disconnect = Button(label='Disconnect from UT330',
button_type="success")
data_connect.on_click(self.data_connect)
data_read.on_click(self.data_read)
data_write.on_click(self.data_write)
data_erase.on_click(self.data_erase)
data_disconnect.on_click(self.data_disconnect)
if self.device_connected:
self.data_status.text = ('UT330 device connected. The Read, '
'Write, Erase, and Disconnect buttons '
'will work.')
else:
self.data_status.text = ('UT330 device is <strong>NOT</strong> '
'connected. The '
'Read, Write, Erase, and Disconnect '
'buttons will <strong>not work</strong>. '
'Press the '
'Connect button if the UT330 is '
'connected on a USB port.')
# Layout
l = layout([[self.data_status],
[data_connect, data_disconnect],
[data_read, data_write, data_erase]],
width=self.page_width)
return Panel(child=l,
title="Read from device")
# Humidity and temperature
# ========================
# Helper function to update the Humidity and Temperature chart
def h_t_update(self):
"""Updates Humidity/Temperature chart"""
self.source.data = {'Timestamp': self.data['Timestamp'],
'Temperature (C)': self.data['Temperature (C)'],
'Relative humidity (%)':
self.data['Relative humidity (%)'],
'Pressure (Pa)': self.data['Pressure (Pa)']}
# Reset the y axis ranges for temperature and humidity
ymin = round(self.data['Temperature (C)'].min() - 2)
ymax = ceil(self.data['Temperature (C)'].max() + 2)
self.h_t_fig.y_range.start = ymin
self.h_t_fig.y_range.end = ymax
ymin = round(self.data['Relative humidity (%)'].min() - 2)
ymax = ceil(self.data['Relative humidity (%)'].max() + 2)
self.h_t_fig.extra_y_ranges['humidity'].start = ymin
self.h_t_fig.extra_y_ranges['humidity'].end = ymax
def h_t_lines_changed(self, active):
"""Helper function for h_t_tab - turns lines on and off"""
for index in range(len(self.h_t_line)):
self.h_t_line[index].visible = index in active
def h_t_tab(self):
"""Plots the humidity and temperature"""
self.h_t_fig = figure(plot_width=int(self.page_width*0.9),
plot_height=self.page_height,
title="Temperature and humidity",
toolbar_location="above",
x_axis_type="datetime")
self.h_t_fig.xaxis.axis_label = "Timestamp"
self.h_t_fig.yaxis.axis_label = "Temperature (C)"
# Ranges need to be defined here - causes update issues if this
# doesn't happen here
self.h_t_fig.y_range = Range1d(start=0, end=100)
self.h_t_fig.extra_y_ranges = {'humidity': Range1d(start=0,
end=100)}
self.h_t_fig.add_layout(LinearAxis(y_range_name='humidity',
axis_label="Relative humidity (%)"),
'right')
# Add the lines
self.h_t_line = 2*[None]
# Plot the humidity/pressure
self.h_t_line[0] = self.h_t_fig.line(x='Timestamp',
y='Temperature (C)',
source=self.source,
color="blue",
legend="Temperature",
line_width=2)
self.h_t_line[1] = self.h_t_fig.line(x="Timestamp",
y="Relative humidity (%)",
source=self.source,
y_range_name="humidity",
color="green",
legend="Humidity",
line_width=2)
# Update the data and the plot ranges
self.h_t_update()
# Checkboxes to show lines
resp_b = [0, 1]
h_t_check_head = Div(text="Responses")
h_t_check = CheckboxGroup(labels=["Temperature", "Humidity"],
active=resp_b,
name="Lines")
h_t_check.on_click(self.h_t_lines_changed)
# Lay out the page
w = widgetbox(h_t_check_head,
h_t_check,
width=int(self.page_width*0.1))
l = row(w, self.h_t_fig)
return Panel(child=l, title="Temperature and humidity")
# Time tab
# =======
def time_connect(self):
"""Connects to the device"""
if self.device_connected:
self.time_status.text = ("Cannot connect the UT330 device "
"because UT330 device already connected.")
return
# Now try and connect
try:
self.ut330.connect()
self.time_status.text = "Connected to the UT330 device."
self.device_connected = True
except IOError as error:
self.time_status.text = error.args[0]
self.device_connected = False
except:
self.time_status.text = "Exception raised in data_connect."
self.device_connected = False
def time_disconnect(self):
"""Disconnects from the device"""
if not self.device_connected:
self.time_status.text = ("Cannot disconnect the UT330 device "
"because no UT330 device connected.")
return
# Now try and disconnect
try:
self.ut330.disconnect()
self.time_status.text = "Disconnected the UT330 device."
self.device_connected = False
except IOError as error:
self.time_status.text = error.args[0]
except:
self.time_status.text = "Exception raised in data_disconnect."
def time_get(self):
"""Gets the time on the device."""
if not self.device_connected:
self.time_status.text = ("Cannot get time from the UT330 device "
"because no UT330 device connected.")
return
try:
before = datetime.datetime.now()
config = self.ut330.read_config()
device = config['timestamp']
after = datetime.datetime.now()
self.time_compare.text = "Date/time on computer before "\
"device call: {0}<br>"\
"Date/time from device: {1}<br>" \
"Date/time on computer after "\
"device call: {2}".format(before,
device,
after)
self.time_status.text = "Got the UT330 date and time."
except:
self.time_status.text = "Exception in time_get."
def time_set(self):
"""Sets the time on the device."""
if not self.device_connected:
self.time_status.text = ("Cannot set time from the UT330 device "
"because no UT330 device connected.")
return
try:
now = datetime.datetime.now()
self.ut330.write_date_time(now)
self.time_status.text = ("Set the UT330 date and time from the "
"computer.")
except:
self.time_status.text = "Exception in time_set."
def time_tab(self):
"""The date and time setting and getting tab"""
self.time_status = Div(text="", width=self.page_width)
time_connect = Button(label='Connect to UT330',
button_type="success")
time_disconnect = Button(label='Disconnect from UT330',
button_type="success")
time_get = Button(label='Get UT330 date and time',
button_type="success")
self.time_compare = Div(text="", width=self.page_width)
time_set = Button(label='Set the UT330 date and time',
button_type="success")
time_connect.on_click(self.time_connect)
time_disconnect.on_click(self.time_disconnect)
time_get.on_click(self.time_get)
time_set.on_click(self.time_set)
l = layout([self.time_status],
[time_connect, time_disconnect],
[time_get, self.time_compare],
[time_set])
return Panel(child=l, title="Date and time setting")
def go(self):
"""Displays the application"""
document = Document()
document.title = "UT330 UI"
document.add_root(self.tabs)
session = push_session(document)
session.show()
session.loop_until_closed()
# =============================================================================
# Main
# =============================================================================
if __name__ == '__main__':
display = Display()
display.go()
|
the-stack_0_18668 | import sys
import json
def read_log(log):
"""
reads a log file and returns lists of epoch, training loss, learning rate
"""
epochs = []
losses = []
lrs = []
t_perps = []
vlosses = []
v_perps= []
with open(log, 'r') as f:
lines = f.readlines()
for line in lines[1:]:
epoch, loss, lr, t_perp, vloss, v_perp, _ = line.split('\t')
epochs.append(float(epoch))
losses.append(float(loss))
lrs.append(float(lr))
vlosses.append(float(vloss))
v_perps.append(float(v_perp))
t_perps.append(float(t_perp))
return epochs, losses, lrs, t_perps, vlosses, v_perps
epochs = []
losses = []
lrs = []
t_perps = []
vlosses = []
v_perps= []
e_c = 0
for i, log_file in enumerate(sys.argv[2:]):
e, lo, lr, t_p, vlo, v_p = read_log(log_file)
t_px = []
for t in t_p:
t_px.append(t)
epochs.extend([n + (e_c) for n in e])
e_c += len(e)
losses.extend(lo)
vlosses.extend(vlo)
lrs.extend(lr)
t_perps.extend(t_px)
v_perps.extend(v_p)
i = 0
lr = lrs[0]
result = {lr: [list(),list(),list(),list(), list()]}
while i < len(epochs):
if lrs[i] == lr:
result[lr][0].append(epochs[i])
result[lr][1].append(losses[i])
result[lr][2].append(vlosses[i])
result[lr][3].append(t_perps[i])
result[lr][4].append(v_perps[i])
i = i + 1
else:
lr = lrs[i]
result[lr] = [list(), list(),list(),list(),list()]
with open(sys.argv[1], 'w') as f:
f.write(json.dumps(result))
|
the-stack_0_18669 | import io
import os
import re
from django.conf import settings
from django.utils.http import urlencode
from six.moves.urllib.parse import urljoin
from kolibri.core.content.errors import InvalidStorageFilenameError
from kolibri.utils import conf
# valid storage filenames consist of 32-char hex plus a file extension
VALID_STORAGE_FILENAME = re.compile(r"[0-9a-f]{32}(-data)?\.[0-9a-z]+")
# set of file extensions that should be considered zip files and allow access to internal files
POSSIBLE_ZIPPED_FILE_EXTENSIONS = set([".zip", ".h5p"])
def _maybe_makedirs(path):
if not os.path.isdir(path):
try:
os.makedirs(path)
# When importing from USB etc, it does not need to create
# directories under external drives that are not writable.
except OSError:
pass
def get_attribute(obj, key):
"""
Get an attribute from an object, regardless of whether it is a dict or an object
"""
if not isinstance(obj, dict):
return getattr(obj, key)
return obj[key]
def get_content_file_name(obj):
return "{checksum}.{extension}".format(
checksum=get_attribute(obj, "id"), extension=get_attribute(obj, "extension")
)
def get_local_content_storage_file_url(obj):
"""
Return a url for the client side to retrieve the content file.
The same url will also be exposed by the file serializer.
"""
if get_attribute(obj, "available"):
return get_content_storage_file_url(filename=get_content_file_name(obj))
else:
return None
# DISK PATHS
def get_content_dir_path(datafolder=None, contentfolder=None):
if contentfolder:
return contentfolder
elif datafolder:
return os.path.join(datafolder, "content")
else:
return conf.OPTIONS["Paths"]["CONTENT_DIR"]
def get_content_fallback_paths():
paths = []
fallback_dirs = conf.OPTIONS["Paths"]["CONTENT_FALLBACK_DIRS"]
for path in fallback_dirs:
path = path.strip()
if not path:
continue
paths.append(path)
return paths
def get_all_content_dir_paths():
return [get_content_dir_path()] + get_content_fallback_paths()
def existing_file_path_in_content_fallback_dirs(subpath):
# see whether the file exists in any of our content fallback directories
for prefix in get_content_fallback_paths():
path = os.path.join(prefix, subpath)
if os.path.exists(path):
return path
# if not, return None
return None
def get_content_database_dir_path(datafolder=None, contentfolder=None):
"""
Returns the path to the content sqlite databases
($HOME/.kolibri/content/databases on POSIX systems, by default)
"""
path = os.path.join(
get_content_dir_path(datafolder=datafolder, contentfolder=contentfolder),
"databases",
)
_maybe_makedirs(path)
return path
def get_content_database_file_path(channel_id, datafolder=None, contentfolder=None):
"""
Given a channel_id, returns the path to the sqlite3 file
($HOME/.kolibri/content/databases/<channel_id>.sqlite3 on POSIX systems, by default)
"""
suffix = "{}.sqlite3".format(channel_id)
primary_path = os.path.join(
get_content_database_dir_path(
datafolder=datafolder, contentfolder=contentfolder
),
suffix,
)
# if the primary path already exists, or the datafolder/contentfolder is overridden, use the primary path
if (
os.path.exists(primary_path)
or datafolder is not None
or contentfolder is not None
):
return primary_path
backup_path = existing_file_path_in_content_fallback_dirs(
os.path.join("databases", suffix)
)
# return backup path if one exists; otherwise, return primary path (even though it doesn't exist yet)
return backup_path or primary_path
def get_upgrade_content_database_file_path(
channel_id, datafolder=None, contentfolder=None
):
return os.path.join(
get_content_database_dir_path(
datafolder=datafolder, contentfolder=contentfolder
),
"{}-upgrade.sqlite3".format(channel_id),
)
def get_annotated_content_database_file_path(
channel_id, datafolder=None, contentfolder=None
):
return os.path.join(
get_content_database_dir_path(
datafolder=datafolder, contentfolder=contentfolder
),
"{}-annotated.sqlite3".format(channel_id),
)
def get_content_storage_dir_path(datafolder=None, contentfolder=None):
path = os.path.join(
get_content_dir_path(datafolder=datafolder, contentfolder=contentfolder),
"storage",
)
_maybe_makedirs(path)
return path
def get_content_storage_file_path(filename, datafolder=None, contentfolder=None):
if not VALID_STORAGE_FILENAME.match(filename):
raise InvalidStorageFilenameError(
"'{}' is not a valid content storage filename".format(filename)
)
suffix = os.path.join(filename[0], filename[1], filename)
primary_path = os.path.join(
get_content_storage_dir_path(
datafolder=datafolder, contentfolder=contentfolder
),
suffix,
)
# if the primary path already exists, or the datapath is overridden, use the primary path
if (
os.path.exists(primary_path)
or datafolder is not None
or contentfolder is not None
):
return primary_path
backup_path = existing_file_path_in_content_fallback_dirs(
os.path.join("storage", suffix)
)
# return backup path if one exists; otherwise, return the primary path (even though it doesn't exist yet)
return backup_path or primary_path
# URL PATHS
def get_content_url(baseurl=None):
return get_content_server_url("content/", baseurl=baseurl)
def get_content_database_url(baseurl=None):
return urljoin(get_content_url(baseurl), "databases/")
def get_content_database_file_url(channel_id, baseurl=None):
return urljoin(get_content_database_url(baseurl), "{}.sqlite3".format(channel_id))
def get_content_storage_url(baseurl=None):
return urljoin(get_content_url(baseurl), "storage/")
def get_content_storage_remote_url(filename, baseurl=None):
return "{}{}/{}/{}".format(
get_content_storage_url(baseurl), filename[0], filename[1], filename
)
def get_content_server_url(path, baseurl=None):
if not baseurl:
baseurl = conf.OPTIONS["Urls"]["CENTRAL_CONTENT_BASE_URL"]
return urljoin(baseurl, path)
def get_info_url(baseurl=None):
return get_content_server_url("/api/public/info", baseurl=baseurl)
def get_channel_lookup_url(
version="1", identifier=None, baseurl=None, keyword=None, language=None
):
content_server_path = "/api/public/v{}/channels".format(version)
if identifier:
content_server_path += "/lookup/{}".format(identifier)
content_server_path += "?"
query_params = {}
if keyword:
query_params["keyword"] = keyword
if language:
query_params["language"] = language
content_server_path += urlencode(query_params)
return get_content_server_url(content_server_path, baseurl=baseurl)
def get_file_checksums_url(channel_id, baseurl, version="1"):
# This endpoint does not exist on Studio, so a baseurl is required.
return get_content_server_url(
"/api/public/v{version}/file_checksums/{channel_id}".format(
version=version, channel_id=channel_id
),
baseurl=baseurl,
)
HASHI = "hashi/"
ZIPCONTENT = "zipcontent/"
def get_zip_content_base_path():
return "{}{}".format(conf.OPTIONS["Deployment"]["URL_PATH_PREFIX"], ZIPCONTENT)
HASHI_FILENAME = None
def get_hashi_filename():
global HASHI_FILENAME
if HASHI_FILENAME is None or getattr(settings, "DEVELOPER_MODE", None):
with io.open(
os.path.join(os.path.dirname(__file__), "../build/hashi_filename"),
mode="r",
encoding="utf-8",
) as f:
HASHI_FILENAME = f.read().strip()
return HASHI_FILENAME
def get_hashi_base_path():
return "{}{}".format(conf.OPTIONS["Deployment"]["URL_PATH_PREFIX"], HASHI)
def get_hashi_path():
return "{}{}{}".format(
conf.OPTIONS["Deployment"]["URL_PATH_PREFIX"], HASHI, get_hashi_filename()
)
def get_content_storage_file_url(filename):
"""
Return the URL at which the specified file can be accessed. For regular files, this is a link to the static
file itself, under "/content/storage/". For "zip" files, this points to a dynamically generated view that
allows the client-side to index into the files within the zip.
"""
ext = os.path.splitext(filename)[1]
if ext in POSSIBLE_ZIPPED_FILE_EXTENSIONS:
return "{}{}/".format(get_zip_content_base_path(), filename)
else:
return "/{}{}/{}/{}".format(
get_content_storage_url(
conf.OPTIONS["Deployment"]["URL_PATH_PREFIX"]
).lstrip("/"),
filename[0],
filename[1],
filename,
)
|
the-stack_0_18670 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""Seq2seq layer operations for use in neural networks.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.util import nest
import tensorflow as tf
def _transpose_batch_time(x):
"""Transpose the batch and time dimensions of a Tensor.
Retains as much of the static shape information as possible.
Args:
x: A tensor of rank 2 or higher.
Returns:
x transposed along the first two dimensions.
Raises:
ValueError: if `x` is rank 1 or lower.
"""
x_static_shape = x.get_shape()
if x_static_shape.ndims is not None and x_static_shape.ndims < 2:
raise ValueError(
"Expected input tensor %s to have rank at least 2, but saw shape: %s" %
(x, x_static_shape))
x_rank = array_ops.rank(x)
x_t = array_ops.transpose(
x, array_ops.concat(
([1, 0], math_ops.range(2, x_rank)), axis=0))
x_t.set_shape(
tensor_shape.TensorShape([
x_static_shape[1].value, x_static_shape[0].value
]).concatenate(x_static_shape[2:]))
return x_t
def _create_zero_outputs(size, dtype, batch_size):
"""Create a zero outputs Tensor structure."""
def _t(s):
return (s if isinstance(s, ops.Tensor) else constant_op.constant(
tensor_shape.TensorShape(s).as_list(),
dtype=dtypes.int32,
name="zero_suffix_shape"))
def _create(s, d):
return array_ops.zeros(
array_ops.concat(
([batch_size], _t(s)), axis=0), dtype=d)
return nest.map_structure(_create, size, dtype)
def dynamic_decode(decoder,
output_time_major=False,
impute_finished=False,
maximum_iterations=None,
parallel_iterations=32,
swap_memory=False,
scope=None):
"""Perform dynamic decoding with `decoder`.
Args:
decoder: A `Decoder` instance.
output_time_major: Python boolean. Default: `False` (batch major). If
`True`, outputs are returned as time major tensors (this mode is faster).
Otherwise, outputs are returned as batch major tensors (this adds extra
time to the computation).
impute_finished: Python boolean. If `True`, then states for batch
entries which are marked as finished get copied through and the
corresponding outputs get zeroed out. This causes some slowdown at
each time step, but ensures that the final state and outputs have
the correct values and that backprop ignores time steps that were
marked as finished.
maximum_iterations: `int32` scalar, maximum allowed number of decoding
steps. Default is `None` (decode until the decoder is fully done).
parallel_iterations: Argument passed to `tf.while_loop`.
swap_memory: Argument passed to `tf.while_loop`.
scope: Optional variable scope to use.
Returns:
`(final_outputs, final_state)`.
Raises:
TypeError: if `decoder` is not an instance of `Decoder`.
ValueError: if maximum_iterations is provided but is not a scalar.
"""
if not isinstance(decoder, tf.contrib.seq2seq.Decoder):
raise TypeError("Expected decoder to be type Decoder, but saw: %s" %
type(decoder))
with variable_scope.variable_scope(scope or "decoder") as varscope:
# Properly cache variable values inside the while_loop
if varscope.caching_device is None:
varscope.set_caching_device(lambda op: op.device)
if maximum_iterations is not None:
maximum_iterations = ops.convert_to_tensor(
maximum_iterations, dtype=dtypes.int32, name="maximum_iterations")
if maximum_iterations.get_shape().ndims != 0:
raise ValueError("maximum_iterations must be a scalar")
initial_finished, initial_inputs, initial_state = decoder.initialize()
zero_outputs = _create_zero_outputs(decoder.output_size,
decoder.output_dtype,
decoder.batch_size)
if maximum_iterations is not None:
initial_finished = math_ops.logical_or(
initial_finished, 0 >= maximum_iterations)
initial_time = constant_op.constant(0, dtype=dtypes.int32)
def _shape(batch_size, from_shape):
if not isinstance(from_shape, tensor_shape.TensorShape):
return tensor_shape.TensorShape(None)
else:
batch_size = tensor_util.constant_value(
ops.convert_to_tensor(
batch_size, name="batch_size"))
return tensor_shape.TensorShape([batch_size]).concatenate(from_shape)
def _create_ta(s, d):
return tensor_array_ops.TensorArray(
dtype=d,
size=0,
dynamic_size=True,
element_shape=_shape(decoder.batch_size, s))
initial_outputs_ta = nest.map_structure(_create_ta, decoder.output_size,
decoder.output_dtype)
def condition(unused_time, unused_outputs_ta, unused_state, unused_inputs,
finished):
return math_ops.logical_not(math_ops.reduce_all(finished))
def body(time, outputs_ta, state, inputs, finished):
"""Internal while_loop body.
Args:
time: scalar int32 tensor.
outputs_ta: structure of TensorArray.
state: (structure of) state tensors and TensorArrays.
inputs: (structure of) input tensors.
finished: 1-D bool tensor.
Returns:
`(time + 1, outputs_ta, next_state, next_inputs, next_finished)`.
"""
(next_outputs, decoder_state, next_inputs,
decoder_finished) = decoder.step(time, inputs, state)
next_finished = math_ops.logical_or(decoder_finished, finished)
if maximum_iterations is not None:
next_finished = math_ops.logical_or(
next_finished, time + 1 >= maximum_iterations)
nest.assert_same_structure(state, decoder_state)
nest.assert_same_structure(outputs_ta, next_outputs)
nest.assert_same_structure(inputs, next_inputs)
# Zero out output values past finish
if impute_finished:
emit = nest.map_structure(
lambda out, zero: array_ops.where(finished, zero, out),
next_outputs,
zero_outputs)
else:
emit = next_outputs
# Copy through states past finish
def _maybe_copy_state(new, cur):
# TensorArrays and scalar states get passed through.
if isinstance(cur, tensor_array_ops.TensorArray):
pass_through = True
else:
new.set_shape(cur.shape)
pass_through = (new.shape.ndims == 0)
return new if pass_through else array_ops.where(finished, cur, new)
if impute_finished:
next_state = nest.map_structure(
_maybe_copy_state, decoder_state, state)
else:
next_state = decoder_state
outputs_ta = nest.map_structure(lambda ta, out: ta.write(time, out),
outputs_ta, emit)
return (time + 1, outputs_ta, next_state, next_inputs, next_finished)
res = control_flow_ops.while_loop(
condition,
body,
loop_vars=[
initial_time, initial_outputs_ta, initial_state, initial_inputs,
initial_finished
],
parallel_iterations=parallel_iterations,
swap_memory=swap_memory)
final_outputs_ta = res[1]
final_state = res[2]
final_outputs = nest.map_structure(
lambda ta: ta.stack(), final_outputs_ta)
if not output_time_major:
final_outputs = nest.map_structure(
_transpose_batch_time, final_outputs)
return final_outputs, final_state
|
the-stack_0_18671 | #!/usr/bin/python
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Exercises the frontend endpoints for the system
"""
import json
import logging
from random import randint, random, choice
import uuid
from locust import HttpLocust, TaskSet, TaskSequence, task, seq_task, between
MASTER_PASSWORD = "password"
TRANSACTION_ACCT_LIST = [str(randint(1111100000, 1111199999)) for _ in range(50)]
def signup_helper(locust, username):
"""
create a new user account in the system
succeeds if token was returned
"""
userdata = {"username":username,
"password":MASTER_PASSWORD,
"password-repeat":MASTER_PASSWORD,
"firstname": username,
"lastname":"TestAccount",
"birthday":"01/01/2000",
"timezone":"82",
"address":"1021 Valley St",
"city":"Seattle",
"state":"WA",
"zip":"98103",
"ssn":"111-22-3333"}
with locust.client.post("/signup", data=userdata, catch_response=True) as response:
found_token = False
for r_hist in response.history:
found_token |= r_hist.cookies.get('token') is not None
if found_token:
response.success()
logging.debug("created user: %s", username)
else:
response.failure("login failed")
return found_token
class AllTasks(TaskSequence):
"""
wrapper for UnauthenticatedTasks and AuthenticatedTasks sets
"""
@seq_task(1)
class UnauthenticatedTasks(TaskSet):
"""
set of tasks to run before obtaining an auth token
"""
@task(5)
def view_login(self):
"""
load the /login page
fails if already logged on (redirets to /home)
"""
with self.client.get("/login", catch_response=True) as response:
for r_hist in response.history:
if r_hist.status_code > 200 and r_hist.status_code < 400:
response.failure("Got redirect")
@task(5)
def view_signup(self):
"""
load the /signup page
fails if not logged on (redirets to /home)
"""
with self.client.get("/signup", catch_response=True) as response:
for r_hist in response.history:
if r_hist.status_code > 200 and r_hist.status_code < 400:
response.failure("Got redirect")
@task(1)
def signup(self):
"""
sends POST request to /signup to create a new user
on success, exits UnauthenticatedTasks
"""
# sign up
new_username = str(uuid.uuid4())
success = signup_helper(self, new_username)
if success:
# go to AuthenticatedTasks
self.locust.username = new_username
self.interrupt()
@seq_task(2)
class AuthenticatedTasks(TaskSet):
"""
set of tasks to run after obtaining an auth token
"""
def on_start(self):
"""
on start, deposit a large balance into each account
to ensure all payments are covered
"""
self.deposit(1000000)
@task(10)
def view_index(self):
"""
load the / page
fails if not logged on (redirets to /login)
"""
with self.client.get("/", catch_response=True) as response:
for r_hist in response.history:
if r_hist.status_code > 200 and r_hist.status_code < 400:
response.failure("Got redirect")
@task(10)
def view_home(self):
"""
load the /home page (identical to /)
fails if not logged on (redirets to /login)
"""
with self.client.get("/home", catch_response=True) as response:
for r_hist in response.history:
if r_hist.status_code > 200 and r_hist.status_code < 400:
response.failure("Got redirect")
@task(5)
def payment(self, amount=None):
"""
POST to /payment, sending money to other account
"""
if amount is None:
amount = random() * 1000
transaction = {"account_num": choice(TRANSACTION_ACCT_LIST),
"amount": amount,
"uuid": str(uuid.uuid4())}
with self.client.post("/payment",
data=transaction,
catch_response=True) as response:
if response.url is None or "failed" in response.url:
response.failure("payment failed")
@task(5)
def deposit(self, amount=None):
"""
POST to /deposit, depositing external money into account
"""
if amount is None:
amount = random() * 1000
acct_info = {"account_num": choice(TRANSACTION_ACCT_LIST),
"routing_num":"111111111"}
transaction = {"account": json.dumps(acct_info),
"amount": amount,
"uuid": str(uuid.uuid4())}
with self.client.post("/deposit",
data=transaction,
catch_response=True) as response:
if "failed" in response.url:
response.failure("deposit failed")
@task(5)
def login(self):
"""
sends POST request to /login with stored credentials
succeeds if a token was returned
"""
with self.client.post("/login", {"username":self.locust.username,
"password":MASTER_PASSWORD},
catch_response=True) as response:
found_token = False
for r_hist in response.history:
found_token |= r_hist.cookies.get('token') is not None
if found_token:
response.success()
else:
response.failure("login failed")
@task(1)
def logout(self):
"""
sends a /logout POST request
fails if not logged in
exits AuthenticatedTasks
"""
self.client.post("/logout")
self.locust.username = None
# go to UnauthenticatedTasks
self.interrupt()
class WebsiteUser(HttpLocust):
"""
Locust class to simulate HTTP users
"""
task_set = AllTasks
wait_time = between(1, 1)
|
the-stack_0_18673 | """
Copyright (C) 2018-2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from mo.graph.graph import Graph
from mo.ops.op import Op
class MXRepeat(Op):
op = 'MXRepeat'
enabled = True
def __init__(self, graph: Graph, attrs: dict):
assert 'axis' in attrs, 'MXRepeat operation should have `axis` parameter set during creation'
assert 'repeats' in attrs, 'MXRepeat operation should have `repeats` parameter set during creation'
super().__init__(graph, {
'op': self.op,
'type': None,
# operation should be resolved on the front phase, partial inference is not needed
'infer': None,
'in_ports_count': 1,
'out_ports_count': 1,
}, attrs)
|
the-stack_0_18675 | # Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import sys
import textwrap
from subcmds import all_commands
from color import Coloring
from command import PagedCommand, MirrorSafeCommand, GitcAvailableCommand, GitcClientCommand
import gitc_utils
from wrapper import Wrapper
class Help(PagedCommand, MirrorSafeCommand):
COMMON = False
helpSummary = "Display detailed help on a command"
helpUsage = """
%prog [--all|command]
"""
helpDescription = """
Displays detailed usage information about a command.
"""
def _PrintCommands(self, commandNames):
"""Helper to display |commandNames| summaries."""
maxlen = 0
for name in commandNames:
maxlen = max(maxlen, len(name))
fmt = ' %%-%ds %%s' % maxlen
for name in commandNames:
command = all_commands[name]()
try:
summary = command.helpSummary.strip()
except AttributeError:
summary = ''
print(fmt % (name, summary))
def _PrintAllCommands(self):
print('usage: repo COMMAND [ARGS]')
self.PrintAllCommandsBody()
def PrintAllCommandsBody(self):
print('The complete list of recognized repo commands are:')
commandNames = list(sorted(all_commands))
self._PrintCommands(commandNames)
print("See 'repo help <command>' for more information on a "
'specific command.')
print('Bug reports:', Wrapper().BUG_URL)
def _PrintCommonCommands(self):
print('usage: repo COMMAND [ARGS]')
self.PrintCommonCommandsBody()
def PrintCommonCommandsBody(self):
print('The most commonly used repo commands are:')
def gitc_supported(cmd):
if not isinstance(cmd, GitcAvailableCommand) and not isinstance(cmd, GitcClientCommand):
return True
if self.client.isGitcClient:
return True
if isinstance(cmd, GitcClientCommand):
return False
if gitc_utils.get_gitc_manifest_dir():
return True
return False
commandNames = list(sorted([name
for name, command in all_commands.items()
if command.COMMON and gitc_supported(command)]))
self._PrintCommands(commandNames)
print(
"See 'repo help <command>' for more information on a specific command.\n"
"See 'repo help --all' for a complete list of recognized commands.")
print('Bug reports:', Wrapper().BUG_URL)
def _PrintCommandHelp(self, cmd, header_prefix=''):
class _Out(Coloring):
def __init__(self, gc):
Coloring.__init__(self, gc, 'help')
self.heading = self.printer('heading', attr='bold')
self._first = True
def _PrintSection(self, heading, bodyAttr):
try:
body = getattr(cmd, bodyAttr)
except AttributeError:
return
if body == '' or body is None:
return
if not self._first:
self.nl()
self._first = False
self.heading('%s%s', header_prefix, heading)
self.nl()
self.nl()
me = 'repo %s' % cmd.NAME
body = body.strip()
body = body.replace('%prog', me)
# Extract the title, but skip any trailing {#anchors}.
asciidoc_hdr = re.compile(r'^\n?#+ ([^{]+)(\{#.+\})?$')
for para in body.split("\n\n"):
if para.startswith(' '):
self.write('%s', para)
self.nl()
self.nl()
continue
m = asciidoc_hdr.match(para)
if m:
self.heading('%s%s', header_prefix, m.group(1))
self.nl()
self.nl()
continue
lines = textwrap.wrap(para.replace(' ', ' '), width=80,
break_long_words=False, break_on_hyphens=False)
for line in lines:
self.write('%s', line)
self.nl()
self.nl()
out = _Out(self.client.globalConfig)
out._PrintSection('Summary', 'helpSummary')
cmd.OptionParser.print_help()
out._PrintSection('Description', 'helpDescription')
def _PrintAllCommandHelp(self):
for name in sorted(all_commands):
cmd = all_commands[name](manifest=self.manifest)
self._PrintCommandHelp(cmd, header_prefix='[%s] ' % (name,))
def _Options(self, p):
p.add_option('-a', '--all',
dest='show_all', action='store_true',
help='show the complete list of commands')
p.add_option('--help-all',
dest='show_all_help', action='store_true',
help='show the --help of all commands')
def Execute(self, opt, args):
if len(args) == 0:
if opt.show_all_help:
self._PrintAllCommandHelp()
elif opt.show_all:
self._PrintAllCommands()
else:
self._PrintCommonCommands()
elif len(args) == 1:
name = args[0]
try:
cmd = all_commands[name](manifest=self.manifest)
except KeyError:
print("repo: '%s' is not a repo command." % name, file=sys.stderr)
sys.exit(1)
self._PrintCommandHelp(cmd)
else:
self._PrintCommandHelp(self)
|
the-stack_0_18676 | import numpy as np
from epipack import StochasticEpiModel
from tqdm import tqdm
S, I, A, B, C0, C1, D, E, F = "S I A B C0 C1 D E F".split(" ")
rateA = 3.0
rateB = 2.0
rateE = 1.0
probAC0 = 0.2
probAC1 = 0.8
probBD = 0.2
N = 6
edges = [ (0, i, 1.0) for i in range(1,N) ]
model = StochasticEpiModel([S,I,A,B,C0,C1,D, E, F], N, edges)
model.set_node_transition_processes([
(I, rateA, A),
(I, rateB, B),
(I, rateE, E),
])
model.set_conditional_link_transmission_processes({
(I, "->", A) : [
( A, S, probAC0, A, C0),
( A, S, probAC1, A, C1),
],
(I, "->", B): [
( B, S, probBD, B, D),
],
(I, "->", E): [
( E, S, "->", E, F),
],
})
statuses = np.zeros(N,dtype=int)
statuses[0] = 1
model.set_node_statuses(statuses)
print(model.node_transition_events)
counts = np.zeros(model.N_comp,dtype=int)
N_measurements = 20000
for meas in tqdm(range(N_measurements)):
model.set_node_statuses(statuses)
_ = model.simulate(1e9)
for c in range(model.N_comp):
counts[c] += np.count_nonzero(model.node_status == c)
from bfmplot import pl
x = np.arange(model.N_comp)
width = 0.4
pl.bar(x-width/2, counts, width)
expected_counts = np.zeros_like(counts)
expected_counts[model.get_compartment_id(A)] = N_measurements * rateA / (rateB + rateE + rateA)
expected_counts[model.get_compartment_id(B)] = N_measurements * rateB / (rateB + rateE + rateA)
expected_counts[model.get_compartment_id(E)] = N_measurements * rateE / (rateB + rateE + rateA)
expected_counts[model.get_compartment_id(C0)] = N_measurements * ((N-1)*rateA / (rateB + rateE + rateA) * probAC0)
expected_counts[model.get_compartment_id(C1)] = N_measurements * ((N-1)*rateA / (rateB + rateE + rateA) * probAC1)
expected_counts[model.get_compartment_id(D)] = N_measurements * ((N-1)*rateB / (rateB + rateE + rateA) * probBD)
expected_counts[model.get_compartment_id(F)] = (N-1) * expected_counts[model.get_compartment_id(E)]
expected_counts[model.get_compartment_id(S)] = N_measurements * N - expected_counts.sum()
pl.bar(x+width/2, expected_counts, width)
pl.xticks(x)
pl.gca().set_xticklabels(model.compartments)
pl.figure()
ndx = np.where(expected_counts==0)
counts[ndx] = 1
expected_counts[ndx] = 1
pl.plot(x, np.abs(1-counts/expected_counts))
from scipy.stats import entropy
_counts = np.delete(counts,1)
_exp_counts = np.delete(expected_counts,1)
print(entropy(_counts, _exp_counts))
pl.show()
|
the-stack_0_18677 | import json
import re
import shutil
from collections import defaultdict
from pathlib import Path
import pandas as pd
from pynwb import NWBHDF5IO
from pynwb.ecephys import ElectricalSeries
from tqdm import tqdm
from ando.AnDOChecker import is_valid
from .bidsconverter import BidsConverter
class NwbToBIDS(BidsConverter):
def __init__(self, dataset_path, **kwargs):
super().__init__(dataset_path, **kwargs)
self.datafiles_list = list(self.dataset_path.glob('**/*.nwb'))
assert len(self.datafiles_list) > 0, 'no nwb files found'
self._extract_metadata()
def _extract_metadata(self):
self._participants_dict.update(data=pd.DataFrame(
columns=['species', 'participant_id', 'sex',
'birthdate', 'age', 'genotype', 'weight']))
for file_no, nwb_file in enumerate(tqdm(self.datafiles_list)):
with NWBHDF5IO(str(nwb_file), 'r') as io:
nwbfile = io.read()
# 1) FULL DATASET INFO:
# subject info:
sub_df, subject_label = self._get_subject_info(nwbfile,
subject_suffix=str(file_no))
if not self._participants_dict['data']['participant_id'].str.contains(
subject_label).any():
self._participants_dict['data'].loc[
len(self._participants_dict['data'].index)] = sub_df
# dataset_info:
if self._dataset_desc_json['data'] is None:
self._dataset_desc_json['data'] = self._get_dataset_info(nwbfile)
# 2) SUBJECT SPECIFIC:
# session info:
base_location_1 = Path(f'{subject_label}')
session_default_dict = dict(name=base_location_1 / f'{subject_label}_sessions.tsv',
data=pd.DataFrame(
columns=['session_id', '#_trials', 'comment']))
session_info = self._get_session_info(nwbfile)
sessions_label = session_info[0]
sessions_df = self._sessions_dict.get(subject_label, session_default_dict)['data']
if not sessions_df['session_id'].str.contains(sessions_label).any():
sessions_df.loc[len(sessions_df.index)] = session_info
session_default_dict.update(data=sessions_df)
self._sessions_dict[subject_label] = session_default_dict
# 3) SUBJECT>SESSION SPECIFIC:
base_location_2 = Path(subject_label) / Path(sessions_label) / Path('ephys')
# channels_info:
channel_default_dict = dict(
name=base_location_2 / f'{subject_label}_{sessions_label}_channels.tsv',
data=self._get_channels_info(nwbfile))
self._channels_dict[subject_label].update({sessions_label: channel_default_dict})
# ephys_json:
ephys_default_dict = dict(
name=base_location_2 / f'{subject_label}_{sessions_label}_ephys.json',
data=self._get_ephys_info(nwbfile, **self._kwargs))
self._ephys_dict[subject_label].update({sessions_label: ephys_default_dict})
# contacts/probes info:
contact_df, probes_df = self._get_contacts_info(nwbfile, **self._kwargs)
contacts_default_dict = dict(
name=base_location_2 / f'{subject_label}_{sessions_label}_contacts.tsv',
data=contact_df)
probes_default_dict = dict(
name=base_location_2 / f'{subject_label}_{sessions_label}_probes.tsv',
data=probes_df)
self._contacts_dict[subject_label].update({sessions_label: contacts_default_dict})
self._probes_dict[subject_label].update({sessions_label: probes_default_dict})
# nwbfile location:
nwbfile_default_dict = dict(
name=base_location_2 / f'{subject_label}_{sessions_label}_ephys.nwb',
data=nwb_file)
self._nwbfile_name_dict[subject_label].update(
{sessions_label: nwbfile_default_dict})
@staticmethod
def _get_subject_info(nwbfile, subject_suffix=''):
if nwbfile.subject is not None:
sb = nwbfile.subject
if sb.subject_id is not None:
sub_id = re.sub(r'[\W_]+', '', sb.subject_id)
subject_label = f'sub-{sub_id}'
else:
subject_label = f'sub-{sb.date_of_birth.strftime("%Y%m%dT%H%M")}'
return [sb.species, subject_label, sb.sex[0] if sb.sex is not None else None,
sb.date_of_birth, sb.age, sb.genotype, sb.weight], subject_label
else:
subject_label = f'sub-noname{subject_suffix}'
return [None, subject_label, None, None, None, None, None], subject_label
@staticmethod
def _get_dataset_info(nwbfile):
return dict(
Name='Electrophysiology', BIDSVersion='1.0.X',
Licence='CC BY 4.0',
Authors=[
list(nwbfile.experimenter) if nwbfile.experimenter is not None else None][0])
@staticmethod
def _get_session_info(nwbfile):
trials_len = len(
nwbfile.trials) if nwbfile.trials is not None else None
if nwbfile.session_id is not None:
ses_id = re.sub(r'[\W_]+', '', nwbfile.session_id)
session_label = f'ses-{ses_id}'
else:
session_label = f'ses-{nwbfile.session_start_time.strftime("%Y%m%dT%H%M")}'
return [session_label, trials_len, nwbfile.session_description]
@staticmethod
def _get_channels_info(nwbfile):
channels_df = pd.DataFrame(
columns=['channel_id', 'contact_id', 'type', 'units', 'sampling_frequency',
'unit_conversion_multiplier'])
es = [
i for i in nwbfile.children if isinstance(
i, ElectricalSeries)]
if len(es) > 0:
es = es[0]
no_channels = es.data.shape[1]
sampling_frequency = es.rate
conversion = es.conversion
unit = es.unit
for chan_no in range(no_channels):
channels_df.loc[len(channels_df.index)] = [chan_no, chan_no, 'neural signal',
unit,
sampling_frequency, conversion]
return channels_df
@staticmethod
def _get_ephys_info(nwbfile, **kwargs):
return dict(PowerLineFrequency=kwargs.get('PowerLineFrequency', 50.0),
InstitutionName=nwbfile.institution,
InstitutionalDepartmentName=nwbfile.lab)
@staticmethod
def _get_contacts_info(nwbfile, **kwargs):
contacts_df = pd.DataFrame(
columns=[
'x',
'y',
'z',
'impedance',
'contact_id',
'probe_id',
'location'])
probes_df = pd.DataFrame(columns=['probe_id', 'type'])
e_table = nwbfile.electrodes
if e_table is not None:
for contact_no in range(len(e_table)):
contacts_df.loc[len(contacts_df.index)] = [e_table.x[contact_no],
e_table.y[contact_no],
e_table.z[contact_no],
e_table.imp[contact_no],
contact_no,
e_table.group[contact_no].device.name,
e_table.location[contact_no]]
for probe_id in contacts_df['probe_id'].unique():
probes_df.loc[len(probes_df.index)] = [
probe_id, kwargs.get('probe_type', 'acute')]
return contacts_df, probes_df
def organize(self, output_path=None, move_nwb=False,
re_write=True, validate=True):
if output_path is None:
output_path = self.dataset_path.parent / 'BIDSExt' / self.dataset_path.name
else:
output_path = Path(output_path)
if re_write and output_path.exists():
shutil.rmtree(output_path)
# CREATE FILES:
# 1) data_desc, participants:
output_path.mkdir(parents=True, exist_ok=True)
data, loc = self._parse_data_dict(self._participants_dict, output_path)
data.dropna(axis='columns', how='all', inplace=True)
data.to_csv(loc, sep='\t', index=False)
data, loc = self._parse_data_dict(self._dataset_desc_json, output_path)
with open(loc, 'w') as j:
if all([True for au in data['Authors'] if au is None]):
_ = data.pop('Authors')
dataset_desc_tosave = {k: v for k,
v in data.items() if v is not None}
json.dump(dataset_desc_tosave, j)
# 2) sessions.tsv:
for ses_file_dict in self._sessions_dict.values():
data, loc = self._parse_data_dict(ses_file_dict, output_path)
if not loc.parent.exists():
loc.parent.mkdir(parents=True)
data.to_csv(loc, sep='\t', index=False)
# 3) subject>sessions>ephys specific files:
for subject_id in self._participants_dict['data']['participant_id']:
for session_id in self._sessions_dict[subject_id]['data']['session_id']:
# ephys.json
base_loc = output_path / subject_id / session_id / 'ephys'
if not base_loc.exists():
base_loc.mkdir(parents=True)
data, loc = self._parse_data_dict(
self._ephys_dict[subject_id][session_id],
output_path)
with open(loc, 'w') as j:
json.dump(data, j)
# channels tsv:
data, loc = self._parse_data_dict(
self._channels_dict[subject_id][session_id],
output_path)
self._write_csv(data, loc)
# contacts/probes tsv:
data, loc = self._parse_data_dict(
self._contacts_dict[subject_id][session_id],
output_path)
self._write_csv(data, loc)
data, loc = self._parse_data_dict(
self._probes_dict[subject_id][session_id],
output_path)
self._write_csv(data, loc)
# nwbfile move:
data, loc = self._parse_data_dict(
self._nwbfile_name_dict[subject_id][session_id],
output_path)
if move_nwb:
if not loc.exists():
data.replace(loc)
else:
if not loc.exists():
loc.symlink_to(data)
if validate:
is_valid(output_path)
def _parse_data_dict(self, data_dict, output_path):
return data_dict['data'], output_path / data_dict['name']
def _write_csv(self, data, loc):
if not loc.exists():
data.dropna(axis='columns', how='all', inplace=True)
data.to_csv(loc, sep='\t', index=False)
|
the-stack_0_18679 | import torch
import torchvision
from PIL import Image
from torchvision import transforms
import numpy as np
import glob
from Tiramisu_calibration_Dataset import *
from torch.utils.data import DataLoader
from calibration_models import *
from torch import nn, optim
import os
from tensorboardX import SummaryWriter
import time
import datetime
import os
import sys
import argparse
sys.path.append(os.path.realpath(".."))
sys.path.append(os.path.realpath("../.."))
sys.path.insert(1, '../dirichlet_python')
sys.path.insert(1, '../experiments_neurips')
from scipy import optimize
from sklearn.isotonic import IsotonicRegression
from probability_measure_CamVid import Calculate_ECE, Calculate_MCE, Calculate_SCE, Calculate_ACE
import pickle
from dirichletcal.calib.fulldirichlet import FullDirichletCalibrator
from dirichletcal.calib.fixeddirichlet import FixedDiagonalDirichletCalibrator
from calib.models.dirichlet_keras import Dirichlet_NN
from keras.models import load_model
import random
total_logits_list = glob.glob('/YOUR_PATH_TO_CamVid/results/val/*_logit.pt')
total_logits_list.sort()
total_logits_test_list = glob.glob('/YOUR_PATH_TO_CamVid/results/test/*_logit.pt')
total_logits_test_list.sort()
train_logits_list = total_logits_list[:90]
val_logits_list = total_logits_list[90:]
torch.cuda.manual_seed(0)
TIRAMISU_train = TIRAMISU_CALIBRATION(train_logits_list, 'val')
TIRAMISU_train_dataloader = DataLoader(TIRAMISU_train, batch_size=1, shuffle=True, num_workers=1, pin_memory=False)
TIRAMISU_val = TIRAMISU_CALIBRATION(val_logits_list, 'val')
TIRAMISU_val_dataloader = DataLoader(TIRAMISU_val, batch_size=1, shuffle=False, num_workers=1, pin_memory=False)
TIRAMISU_test = TIRAMISU_CALIBRATION(total_logits_test_list, 'test')
TIRAMISU_test_dataloader = DataLoader(TIRAMISU_test, batch_size=1, shuffle=False, num_workers=1, pin_memory=False)
print("merge individual cases!")
all_probs = None
all_labels = None
for i, (val_image, val_logits, val_labels, val_preds, val_boundary) in enumerate(TIRAMISU_val_dataloader):
test_probs = torch.softmax(val_logits, dim=1).detach().squeeze().cpu().numpy()
val_label_array = val_labels.detach().squeeze().cpu().numpy()
prob_img_array_select = np.transpose(test_probs.reshape((12, -1)))
val_label_array_select = val_label_array.reshape(-1)
# val_label_array_select_onehot = np.eye(12)[val_label_array_select]
if all_probs is None:
all_probs = prob_img_array_select
# all_labels = val_label_array_select_onehot
all_labels = val_label_array_select
else:
all_probs = np.concatenate((all_probs, prob_img_array_select), axis=0)
# all_labels = np.concatenate((all_labels, val_label_array_select_onehot), axis=0)
all_labels = np.concatenate((all_labels, val_label_array_select))
print('start training!')
# l2_odir = 1e-2
# dirichlet_calibration = FullDirichletCalibrator(reg_lambda=l2_odir, reg_mu=l2_odir, reg_norm=False)
# dirichlet_calibration = FixedDiagonalDirichletCalibrator()
dirichlet_calibration = Dirichlet_NN(l2=0.001, classes=12, comp=True, max_epochs=20, patience=3, lr=0.0001)
dirichlet_calibration.fit(all_probs, all_labels)
# dirichlet_calibration.save('./calibration/dirichlet_prob.h5')
# dirichlet_calibration.load_model('./calibration/dirichlet_prob.h5')
res_list_Local_ECE = []
res_list_Local_MCE = []
res_list_Local_SCE = []
res_list_Local_ACE = []
## add local patch center
random.seed(10)
patch_len = 36
for ind, (test_image, test_logits, test_labels, test_preds, test_boundary) in enumerate(TIRAMISU_test_dataloader):
print(ind)
image_shape = test_logits.squeeze().shape
test_probs = np.transpose(torch.softmax(test_logits, dim=1).detach().squeeze().cpu().numpy().reshape((12, -1)))
dirichlet_correction = np.transpose(dirichlet_calibration.predict(test_probs)).reshape(image_shape)
gt_img_array = test_labels.squeeze().cpu().numpy()
boundary_img_array = test_boundary.squeeze().cpu().numpy()
prob_img_array = np.max(dirichlet_correction, axis=0)/np.sum(dirichlet_correction, axis=0)
pred_img_array = np.argmax(dirichlet_correction, axis=0)
for lo_ind in range(10):
random_center = (
random.randint(84, pred_img_array.shape[0] - 84), random.randint(84, pred_img_array.shape[1] - 84))
patch_prob_img_array = prob_img_array[random_center[0] - patch_len:random_center[0] + patch_len, random_center[1] - patch_len:random_center[1] + patch_len]
patch_pred_img_array = pred_img_array[random_center[0] - patch_len:random_center[0] + patch_len, random_center[1] - patch_len:random_center[1] + patch_len]
patch_gt_img_array = gt_img_array[random_center[0] - patch_len:random_center[0] + patch_len, random_center[1] - patch_len:random_center[1] + patch_len]
res_list_Local_ECE.append(Calculate_ECE(confidence=patch_prob_img_array, prediction=patch_pred_img_array, gt=patch_gt_img_array, boundary=boundary_img_array, boundary_on=False, n_bins=10))
res_list_Local_MCE.append(Calculate_MCE(confidence=patch_prob_img_array, prediction=patch_pred_img_array, gt=patch_gt_img_array, boundary=boundary_img_array, boundary_on=False, n_bins=10))
res_list_Local_SCE.append(Calculate_SCE(confidence=patch_prob_img_array, prediction=patch_pred_img_array, gt=patch_gt_img_array, boundary=boundary_img_array, boundary_on=False, n_bins=10))
res_list_Local_ACE.append(Calculate_ACE(confidence=patch_prob_img_array, prediction=patch_pred_img_array, gt=patch_gt_img_array, boundary=boundary_img_array, boundary_on=False, n_bins=10))
with open("./CamVid_result/"+"Dirichlet_CamVid_ICCV_Local_36_ECE.txt", "wb") as fp_ECE:
pickle.dump(res_list_Local_ECE, fp_ECE, protocol=2)
with open("./CamVid_result/"+"Dirichlet_CamVid_ICCV_Local_36_MCE.txt", "wb") as fp_MCE:
pickle.dump(res_list_Local_MCE, fp_MCE, protocol=2)
with open("./CamVid_result/"+"Dirichlet_CamVid_ICCV_Local_36_SCE.txt", "wb") as fp_SCE:
pickle.dump(res_list_Local_SCE, fp_SCE, protocol=2)
with open("./CamVid_result/"+"Dirichlet_CamVid_ICCV_Local_36_ACE.txt", "wb") as fp_ACE:
pickle.dump(res_list_Local_ACE, fp_ACE, protocol=2)
print('ECE Local: ', np.mean(res_list_Local_ECE), np.std(res_list_Local_ECE))
print('MCE Local: ', np.mean(res_list_Local_MCE), np.std(res_list_Local_MCE))
print('SCE Local: ', np.mean(res_list_Local_SCE), np.std(res_list_Local_SCE))
print('ACE Local: ', np.mean(res_list_Local_ACE), np.std(res_list_Local_ACE))
|
the-stack_0_18680 | import json
import os
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
from typing import Union
import optuna
from optuna._experimental import experimental
from optuna._imports import try_import
with try_import() as _imports:
import allennlp
import allennlp.commands
import allennlp.common.util
from allennlp.training import EpochCallback
if _imports.is_successful():
import _jsonnet
else:
EpochCallback = object # NOQA
def dump_best_config(input_config_file: str, output_config_file: str, study: optuna.Study) -> None:
"""Save JSON config file after updating with parameters from the best trial in the study.
Args:
input_config_file:
Input Jsonnet config file used with
:class:`~optuna.integration.AllenNLPExecutor`.
output_config_file:
Output JSON config file.
study:
Instance of :class:`~optuna.study.Study`.
Note that :func:`~optuna.study.Study.optimize` must have been called.
"""
_imports.check()
best_params = study.best_params
for key, value in best_params.items():
best_params[key] = str(value)
best_config = json.loads(_jsonnet.evaluate_file(input_config_file, ext_vars=best_params))
best_config = allennlp.common.params.infer_and_cast(best_config)
with open(output_config_file, "w") as f:
json.dump(best_config, f, indent=4)
@experimental("1.4.0")
class AllenNLPExecutor(object):
"""AllenNLP extension to use optuna with Jsonnet config file.
This feature is experimental since AllenNLP major release will come soon.
The interface may change without prior notice to correspond to the update.
See the examples of `objective function <https://github.com/optuna/optuna/blob/
master/examples/allennlp/allennlp_jsonnet.py>`_ and
`config file <https://github.com/optuna/optuna/blob/master/
examples/allennlp/classifier.jsonnet>`_.
.. note::
In :class:`~optuna.integration.AllenNLPExecutor`,
you can pass parameters to AllenNLP by either defining a search space using
Optuna suggest methods or setting environment variables just like AllenNLP CLI.
If a value is set in both a search space in Optuna and the environment variables,
the executor will use the value specified in the search space in Optuna.
Args:
trial:
A :class:`~optuna.trial.Trial` corresponding to the current evaluation
of the objective function.
config_file:
Config file for AllenNLP.
Hyperparameters should be masked with ``std.extVar``.
Please refer to `the config example <https://github.com/allenai/allentune/blob/
master/examples/classifier.jsonnet>`_.
serialization_dir:
A path which model weights and logs are saved.
metrics:
An evaluation metric for the result of ``objective``.
include_package:
Additional packages to include.
For more information, please see
`AllenNLP documentation <https://docs.allennlp.org/master/api/commands/train/>`_.
"""
def __init__(
self,
trial: optuna.Trial,
config_file: str,
serialization_dir: str,
metrics: str = "best_validation_accuracy",
*,
include_package: Optional[Union[str, List[str]]] = None
):
_imports.check()
self._params = trial.params
self._config_file = config_file
self._serialization_dir = serialization_dir
self._metrics = metrics
if include_package is None:
include_package = []
if isinstance(include_package, str):
self._include_package = [include_package]
else:
self._include_package = include_package
def _build_params(self) -> Dict[str, Any]:
"""Create a dict of params for AllenNLP.
_build_params is based on allentune's train_func.
For more detail, please refer to
https://github.com/allenai/allentune/blob/master/allentune/modules/allennlp_runner.py#L34-L65
"""
params = self._environment_variables()
params.update({key: str(value) for key, value in self._params.items()})
allennlp_params = json.loads(_jsonnet.evaluate_file(self._config_file, ext_vars=params))
# allennlp_params contains a list of string or string as value values.
# Some params couldn't be casted correctly and
# infer_and_cast converts them into desired values.
return allennlp.common.params.infer_and_cast(allennlp_params)
@staticmethod
def _is_encodable(value: str) -> bool:
# https://github.com/allenai/allennlp/blob/master/allennlp/common/params.py#L77-L85
return (value == "") or (value.encode("utf-8", "ignore") != b"")
def _environment_variables(self) -> Dict[str, str]:
return {key: value for key, value in os.environ.items() if self._is_encodable(value)}
def run(self) -> float:
"""Train a model using AllenNLP."""
try:
import_func = allennlp.common.util.import_submodules
except AttributeError:
import_func = allennlp.common.util.import_module_and_submodules
for package_name in self._include_package:
import_func(package_name)
params = allennlp.common.params.Params(self._build_params())
allennlp.commands.train.train_model(params, self._serialization_dir)
metrics = json.load(open(os.path.join(self._serialization_dir, "metrics.json")))
return metrics[self._metrics]
@experimental("2.0.0")
class AllenNLPPruningCallback(EpochCallback):
"""AllenNLP callback to prune unpromising trials.
See `the example <https://github.com/optuna/optuna/blob/master/
examples/allennlp/allennlp_simple.py>`__
if you want to add a proning callback which observes a metric.
Args:
trial:
A :class:`~optuna.trial.Trial` corresponding to the current evaluation of the
objective function.
monitor:
An evaluation metric for pruning, e.g. ``validation_loss`` or
``validation_accuracy``.
"""
def __init__(self, trial: optuna.trial.Trial, monitor: str):
_imports.check()
if allennlp.__version__ < "1.0.0":
raise Exception("AllenNLPPruningCallback requires `allennlp`>=1.0.0.")
self._trial = trial
self._monitor = monitor
def __call__(
self,
trainer: "allennlp.training.GradientDescentTrainer",
metrics: Dict[str, Any],
epoch: int,
is_master: bool,
) -> None:
value = metrics.get(self._monitor)
if value is None:
return
self._trial.report(float(value), epoch)
if self._trial.should_prune():
raise optuna.TrialPruned()
|
the-stack_0_18681 | import json
import re
from collections import defaultdict
import yara
from django.conf import settings
from django.core.exceptions import ValidationError
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django_extensions.db.fields.json import JSONField
import olympia.core.logger
from olympia.amo.models import ModelBase
from olympia.constants.scanners import (
ABORTED,
ABORTING,
ACTIONS,
COMPLETED,
CUSTOMS,
DELAY_AUTO_APPROVAL,
DELAY_AUTO_APPROVAL_INDEFINITELY,
FLAG_FOR_HUMAN_REVIEW,
QUERY_RULE_STATES,
MAD,
NEW,
NO_ACTION,
RESULT_STATES,
RUNNING,
SCANNERS,
SCHEDULED,
UNKNOWN,
WAT,
YARA,
)
from olympia.files.models import FileUpload
from olympia.scanners.actions import (
_delay_auto_approval,
_delay_auto_approval_indefinitely,
_flag_for_human_review,
_flag_for_human_review_by_scanner,
_no_action,
)
log = olympia.core.logger.getLogger('z.scanners.models')
class AbstractScannerResult(ModelBase):
# Store the "raw" results of a scanner.
results = JSONField(default=[])
scanner = models.PositiveSmallIntegerField(choices=SCANNERS.items())
has_matches = models.NullBooleanField()
state = models.PositiveSmallIntegerField(
choices=RESULT_STATES.items(), null=True, blank=True, default=UNKNOWN
)
version = models.ForeignKey(
'versions.Version',
related_name="%(class)ss",
on_delete=models.CASCADE,
null=True,
)
class Meta(ModelBase.Meta):
abstract = True
indexes = [
models.Index(fields=('has_matches',)),
models.Index(fields=('state',)),
]
def add_yara_result(self, rule, tags=None, meta=None):
"""This method is used to store a Yara result."""
self.results.append(
{'rule': rule, 'tags': tags or [], 'meta': meta or {}}
)
def extract_rule_names(self):
"""This method parses the raw results and returns the (matched) rule
names. Not all scanners have rules that necessarily match."""
if self.scanner == YARA:
return sorted({result['rule'] for result in self.results})
if self.scanner == CUSTOMS and 'matchedRules' in self.results:
return self.results['matchedRules']
# We do not have support for the remaining scanners (yet).
return []
def save(self, *args, **kwargs):
rule_model = self._meta.get_field('matched_rules').related_model
matched_rules = rule_model.objects.filter(
scanner=self.scanner,
name__in=self.extract_rule_names(),
# See: https://github.com/mozilla/addons-server/issues/13143
is_active=True,
)
self.has_matches = bool(matched_rules)
# Save the instance first...
super().save(*args, **kwargs)
# ...then add the associated rules.
for scanner_rule in matched_rules:
self.matched_rules.add(scanner_rule)
def get_scanner_name(self):
return SCANNERS.get(self.scanner)
def get_pretty_results(self):
return json.dumps(self.results, indent=2)
def get_files_by_matched_rules(self):
res = defaultdict(list)
if self.scanner is YARA:
for item in self.results:
res[item['rule']].append(item['meta'].get('filename', '???'))
elif self.scanner is CUSTOMS:
scanMap = self.results.get('scanMap', {})
for filename, rules in scanMap.items():
for ruleId, data in rules.items():
if data.get('RULE_HAS_MATCHED', False):
res[ruleId].append(filename)
return res
def can_report_feedback(self):
return self.state == UNKNOWN and self.scanner not in [WAT, MAD]
def can_revert_feedback(self):
return self.state != UNKNOWN and self.scanner not in [WAT, MAD]
def get_git_repository(self):
return {
CUSTOMS: settings.CUSTOMS_GIT_REPOSITORY,
YARA: settings.YARA_GIT_REPOSITORY,
}.get(self.scanner)
@classmethod
def run_action(cls, version):
"""Try to find and execute an action for a given version, based on the
scanner results and associated rules.
If an action is found, it is run synchronously from this method, not in
a task.
"""
log.info('Checking rules and actions for version %s.', version.pk)
try:
mad_result = cls.objects.filter(version=version, scanner=MAD).get()
customs = mad_result.results.get('scanners', {}).get('customs', {})
customs_score = customs.get('score', 0.5)
customs_models_agree = customs.get('result_details', {}).get(
'models_agree', True
)
if (
customs_score <= 0.01 or
customs_score >= 0.99 or
not customs_models_agree
):
log.info('Flagging version %s for human review by MAD.',
version.pk)
_flag_for_human_review_by_scanner(version, MAD)
except cls.DoesNotExist:
log.debug('No MAD scanner result for version %s.', version.pk)
pass
rule_model = cls.matched_rules.rel.model
result_query_name = cls._meta.get_field(
'matched_rules'
).related_query_name()
rule = (
rule_model.objects.filter(
**{f'{result_query_name}__version': version, 'is_active': True}
)
.order_by(
# The `-` sign means descending order.
'-action'
)
.first()
)
if not rule:
log.info('No action to execute for version %s.', version.pk)
return
action_id = rule.action
action_name = ACTIONS.get(action_id, None)
if not action_name:
raise Exception("invalid action %s" % action_id)
ACTION_FUNCTIONS = {
NO_ACTION: _no_action,
FLAG_FOR_HUMAN_REVIEW: _flag_for_human_review,
DELAY_AUTO_APPROVAL: _delay_auto_approval,
DELAY_AUTO_APPROVAL_INDEFINITELY: (
_delay_auto_approval_indefinitely
),
}
action_function = ACTION_FUNCTIONS.get(action_id, None)
if not action_function:
raise Exception("no implementation for action %s" % action_id)
# We have a valid action to execute, so let's do it!
log.info(
'Starting action "%s" for version %s.', action_name, version.pk
)
action_function(version)
log.info('Ending action "%s" for version %s.', action_name, version.pk)
class AbstractScannerRule(ModelBase):
name = models.CharField(
max_length=200,
help_text=_('This is the exact name of the rule used by a scanner.'),
)
scanner = models.PositiveSmallIntegerField(choices=SCANNERS.items())
action = models.PositiveSmallIntegerField(
choices=ACTIONS.items(), default=NO_ACTION
)
is_active = models.BooleanField(
default=True,
help_text=_(
'When unchecked, the scanner results will not be bound to this '
'rule and the action will not be executed.'
),
)
definition = models.TextField(null=True, blank=True)
class Meta(ModelBase.Meta):
abstract = True
unique_together = ('name', 'scanner')
@classmethod
def get_yara_externals(cls):
"""
Return a dict with the various external variables we inject in every
yara rule automatically and their default values.
"""
return {
'is_json_file': False,
'is_manifest_file': False,
'is_locale_file': False,
}
def __str__(self):
return self.name
def clean(self):
if self.scanner == YARA:
self.clean_yara()
def clean_yara(self):
if not self.definition:
raise ValidationError(
{'definition': _('Yara rules should have a definition')}
)
if 'rule {}'.format(self.name) not in self.definition:
raise ValidationError(
{
'definition': _(
'The name of the rule in the definition should match '
'the name of the scanner rule'
)
}
)
if len(re.findall(r'rule\s+.+?\s+{', self.definition)) > 1:
raise ValidationError(
{
'definition': _(
'Only one Yara rule is allowed in the definition'
)
}
)
try:
yara.compile(
source=self.definition, externals=self.get_yara_externals()
)
except yara.SyntaxError as syntaxError:
raise ValidationError(
{
'definition': _('The definition is not valid: %(error)s')
% {'error': syntaxError}
}
)
except Exception:
raise ValidationError(
{
'definition': _(
'An error occurred when compiling the definition'
)
}
)
class ScannerRule(AbstractScannerRule):
class Meta(AbstractScannerRule.Meta):
db_table = 'scanners_rules'
class ScannerResult(AbstractScannerResult):
upload = models.ForeignKey(
FileUpload,
related_name="%(class)ss", # scannerresults
on_delete=models.SET_NULL,
null=True,
)
matched_rules = models.ManyToManyField(
'ScannerRule', through='ScannerMatch', related_name='results'
)
# The value is a decimal between 0 and 1. `-1` is a special value to
# indicate an error or no score available.
score = models.DecimalField(
null=True, blank=True, max_digits=6, decimal_places=5, default=-1
)
class Meta(AbstractScannerResult.Meta):
db_table = 'scanners_results'
constraints = [
models.UniqueConstraint(
fields=('upload', 'scanner', 'version'),
name='scanners_results_upload_id_scanner_'
'version_id_ad9eb8a6_uniq',
)
]
class ScannerMatch(ModelBase):
result = models.ForeignKey(ScannerResult, on_delete=models.CASCADE)
rule = models.ForeignKey(ScannerRule, on_delete=models.CASCADE)
class ImproperScannerQueryRuleStateError(ValueError):
pass
class ScannerQueryRule(AbstractScannerRule):
scanner = models.PositiveSmallIntegerField(
choices=((YARA, 'yara'),), # For now code search only allows yara.
default=YARA,
)
state = models.PositiveSmallIntegerField(
choices=QUERY_RULE_STATES.items(), default=NEW
)
run_on_disabled_addons = models.BooleanField(
default=False,
help_text=_(
'Run this rule on add-ons that have been '
'force-disabled as well.'
),
)
celery_group_result_id = models.UUIDField(default=None, null=True)
task_count = models.PositiveIntegerField(default=0)
class Meta(AbstractScannerRule.Meta):
db_table = 'scanners_query_rules'
def change_state_to(self, target):
"""Immediately change state of the rule in database or raise
ImproperScannerQueryRuleStateError."""
prereqs = {
# New is the default state.
NEW: (),
# Scheduled should only happen through the admin. It's the
# prerequisite to running the task.
SCHEDULED: (NEW,),
# Running should only happen through the task, after we went
# through the admin to schedule the query.
RUNNING: (SCHEDULED,),
# Aborting can happen from various states.
ABORTING: (NEW, SCHEDULED, RUNNING),
# Aborted should only happen after aborting.
ABORTED: (ABORTING,),
# Completed should only happen through the task
COMPLETED: (RUNNING,),
}
if self.state in prereqs[target]:
self.update(state=target)
else:
raise ImproperScannerQueryRuleStateError()
def _get_completed_tasks_count(self):
if self.celery_group_result_id is not None:
from olympia.amo.celery import app as celery_app
result = celery_app.GroupResult.restore(
str(self.celery_group_result_id)
)
if result:
return result.completed_count()
return None
def completion_rate(self):
if self.state == RUNNING:
completed_tasks_count = self._get_completed_tasks_count()
if completed_tasks_count is not None and self.task_count:
rate = (completed_tasks_count / self.task_count) * 100
return '{:.2f}%'.format(rate)
return None
class ScannerQueryResult(AbstractScannerResult):
# Has to be overridden, because the parent refers to ScannerMatch.
matched_rules = models.ManyToManyField(
'ScannerQueryRule', through='ScannerQueryMatch', related_name='results'
)
class Meta(AbstractScannerResult.Meta):
db_table = 'scanners_query_results'
# FIXME indexes, unique constraints ?
class ScannerQueryMatch(ModelBase):
result = models.ForeignKey(ScannerQueryResult, on_delete=models.CASCADE)
rule = models.ForeignKey(ScannerQueryRule, on_delete=models.CASCADE)
class VersionScannerFlags(ModelBase):
version = models.OneToOneField(
'versions.Version', primary_key=True, on_delete=models.CASCADE
)
needs_human_review_by_mad = models.BooleanField(default=False)
@property
def needs_human_review(self):
return self.needs_human_review_by_mad
|
the-stack_0_18683 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
from pylib.base import base_test_result
def GenerateResultsDict(test_run_results):
"""Create a results dict from |test_run_results| suitable for writing to JSON.
Args:
test_run_results: a list of base_test_result.TestRunResults objects.
Returns:
A results dict that mirrors the one generated by
base/test/launcher/test_results_tracker.cc:SaveSummaryAsJSON.
"""
# Example json output.
# {
# "global_tags": [],
# "all_tests": [
# "test1",
# "test2",
# ],
# "disabled_tests": [],
# "per_iteration_data": [
# {
# "test1": [
# {
# "status": "SUCCESS",
# "elapsed_time_ms": 1,
# "output_snippet": "",
# "output_snippet_base64": "",
# "losless_snippet": "",
# },
# ],
# "test2": [
# {
# "status": "FAILURE",
# "elapsed_time_ms": 12,
# "output_snippet": "",
# "output_snippet_base64": "",
# "losless_snippet": "",
# },
# ],
# },
# {
# "test1": [
# {
# "status": "SUCCESS",
# "elapsed_time_ms": 1,
# "output_snippet": "",
# "output_snippet_base64": "",
# "losless_snippet": "",
# },
# ],
# "test2": [
# {
# "status": "FAILURE",
# "elapsed_time_ms": 12,
# "output_snippet": "",
# "output_snippet_base64": "",
# "losless_snippet": "",
# },
# ],
# },
# ...
# ],
# }
def status_as_string(s):
if s == base_test_result.ResultType.PASS:
return 'SUCCESS'
elif s == base_test_result.ResultType.SKIP:
return 'SKIPPED'
elif s == base_test_result.ResultType.FAIL:
return 'FAILURE'
elif s == base_test_result.ResultType.CRASH:
return 'CRASH'
elif s == base_test_result.ResultType.TIMEOUT:
return 'TIMEOUT'
elif s == base_test_result.ResultType.UNKNOWN:
return 'UNKNOWN'
all_tests = set()
per_iteration_data = []
for test_run_result in test_run_results:
iteration_data = {
t.GetName(): [{
'status': status_as_string(t.GetType()),
'elapsed_time_ms': t.GetDuration(),
'output_snippet': '',
'losless_snippet': '',
'output_snippet_base64:': '',
}]
for t in test_run_result.GetAll()
}
all_tests = all_tests.union(set(iteration_data.iterkeys()))
per_iteration_data.append(iteration_data)
return {
'global_tags': [],
'all_tests': sorted(list(all_tests)),
# TODO(jbudorick): Add support for disabled tests within base_test_result.
'disabled_tests': [],
'per_iteration_data': per_iteration_data,
}
def GenerateJsonResultsFile(test_run_result, file_path):
"""Write |test_run_result| to JSON.
This emulates the format of the JSON emitted by
base/test/launcher/test_results_tracker.cc:SaveSummaryAsJSON.
Args:
test_run_result: a base_test_result.TestRunResults object.
file_path: The path to the JSON file to write.
"""
with open(file_path, 'w') as json_result_file:
json_result_file.write(json.dumps(GenerateResultsDict(test_run_result)))
def ParseResultsFromJson(json_results):
"""Creates a list of BaseTestResult objects from JSON.
Args:
json_results: A JSON dict in the format created by
GenerateJsonResultsFile.
"""
def string_as_status(s):
if s == 'SUCCESS':
return base_test_result.ResultType.PASS
elif s == 'SKIPPED':
return base_test_result.ResultType.SKIP
elif s == 'FAILURE':
return base_test_result.ResultType.FAIL
elif s == 'CRASH':
return base_test_result.ResultType.CRASH
elif s == 'TIMEOUT':
return base_test_result.ResultType.TIMEOUT
else:
return base_test_result.ResultType.UNKNOWN
results_list = []
testsuite_runs = json_results['per_iteration_data']
for testsuite_run in testsuite_runs:
for test, test_runs in testsuite_run.iteritems():
results_list.extend(
[base_test_result.BaseTestResult(test,
string_as_status(tr['status']),
duration=tr['elapsed_time_ms'])
for tr in test_runs])
return results_list
|
the-stack_0_18687 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class RouteTable(Resource):
"""Route table resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param routes: Collection of routes contained within a route table.
:type routes: list[~azure.mgmt.network.v2017_06_01.models.Route]
:ivar subnets: A collection of references to subnets.
:vartype subnets: list[~azure.mgmt.network.v2017_06_01.models.Subnet]
:param provisioning_state: The provisioning state of the resource.
Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param etag: Gets a unique read-only string that changes whenever the
resource is updated.
:type etag: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'subnets': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'routes': {'key': 'properties.routes', 'type': '[Route]'},
'subnets': {'key': 'properties.subnets', 'type': '[Subnet]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, *, id: str=None, location: str=None, tags=None, routes=None, provisioning_state: str=None, etag: str=None, **kwargs) -> None:
super(RouteTable, self).__init__(id=id, location=location, tags=tags, **kwargs)
self.routes = routes
self.subnets = None
self.provisioning_state = provisioning_state
self.etag = etag
|
the-stack_0_18688 | from subprocess import call, PIPE, Popen
from sys import stdin, stdout, platform, version_info
from os import system
from termios import tcgetattr, tcsetattr, TCSADRAIN
from time import sleep
from tty import setcbreak
from dialog import Dialog
d = Dialog(dialog="dialog")
d.set_background_title("036 Creative Studios")
LANGUAGE: int = 0
def main() -> None: utils.clear(); language(); cover(); verify(); menu()
def printer(type: str, position: int) -> None:
GREEN = '\033[92m'; WARNING = '\033[93m'; FAIL = '\033[91m'; ENDC = '\033[0m'
DICTIONARY_ENG=(
"Your Operating System is not GNU/Linux, exiting",
"In this system the binary sudo doesn't exist.",
"e4defrag binary is not present in this system, please install",
"fsck.ext4 is not present in this system, please install",
"The dialog binary is not available in this system, please install",
"All dependencies is ok!",
"There's not ext4 partitions available, only works with USB devices",
"All the ext4 partitions are mounted in your system, please unmount the desired partition to optimize",
"=============== VERIFY FILESYSTEM ERRORS =============== \n",
"=============== FAILURE =============== \n",
"=============== OK =============== \n",
"=============== OPTIMIZE FILESYSTEM =============== \n",
"=============== DEFRAG FILESYSTEM, PLEASE WAIT =============== \n",
"=============== LAST VERIFY FILESYSTEM =============== \n",
"Your Python versión is less than 3.5, exiting",
"You need to be root, execute with sudo"
)
DICTIONARY_ESP=(
"Este sistema no es GNU/Linux, saliendo",
"En este sistema no existe el binario de superusuario.",
"El ejecutable e4defrag no está presente en tu sistema, por favor instalalo",
"fsck.ext4 no está presente en tu sistema, por favor instalalo",
"dialog no está presente en tu sistema, por favor instalalo",
"Todo ok!",
"No hay particiones ext4 disponibles en el sistema, solofunciona con dispositivos USB",
"Todas las particiones ext4 estan montadas en el sistema, por favor desmontar la particion deseada para optimizar",
"=============== VERIFICAR ERRORES EN EL SISTEMA DE ARCHIVOS =============== \n",
"=============== FALLA =============== \n",
"=============== LISTO =============== \n",
"=============== OPTIMIZAR EL SISTEMA DE ARCHIVOS =============== \n",
"=============== DESFRAGMENTAR EL SISTEMA DE ARCHIVOS, ESPERE POR FAVOR =============== \n",
"=============== VERIFICAR POR ULTIMA VEZ EL SISTEMA DE ARCHIVOS =============== \n",
"Tu versión de Python es menor que 3.5, saliendo",
"Necesitas ser superusuario, ejecuta con sudo"
)
if LANGUAGE == 1:
if type == "print": print(f"{DICTIONARY_ENG[position]}")
elif type == "info": print(f"[{GREEN}+{ENDC}] INFO: {DICTIONARY_ENG[position]}")
elif type == "warn": print(f"[{WARNING}*{ENDC}] WARNING: {DICTIONARY_ENG[position]}")
elif type == "error": print(f"[{FAIL}!{ENDC}] ERROR: {DICTIONARY_ENG[position]}")
else: print(f"[?] UNKNOWN: {DICTIONARY_ENG[position]}")
else:
if type == "print": print(f"{DICTIONARY_ESP[position]}")
elif type == "info": print(f"[{GREEN}+{ENDC}] INFO: {DICTIONARY_ESP[position]}")
elif type == "warn": print(f"[{WARNING}*{ENDC}] WARNING: {DICTIONARY_ESP[position]}")
elif type == "error": print(f"[{FAIL}!{ENDC}] ERROR: {DICTIONARY_ESP[position]}")
else: print(f"[?] UNKNOWN: {DICTIONARY_ESP[position]}")
def reader(position: int) -> str:
DICTIONARY_ENG=(
"Choose a Option\n",
"Optimize a ext4 partition",
"Exit to the shell",
"Please select a partition" ,
"Press Enter to continue...",
"Optimize",
"Exit"
)
DICTIONARY_ESP=(
"Seleccione una opcion\n",
"Optimizar una particion de tipo ext4",
"Salir al shell",
"Por favor selecciona una partition",
"Presione Enter para continuar...",
"Optimizar",
"Salir"
)
if LANGUAGE == 1: return DICTIONARY_ENG[position]
else: return DICTIONARY_ESP[position]
def commandverify(cmd: str) -> bool:
return call("type " + cmd, shell=True, stdout=PIPE, stderr=PIPE) == 0
def language() -> None:
global LANGUAGE
print("Bienvenido / Welcome")
print("Please, choose your language / Por favor selecciona tu idioma")
print("1) English"); print("2) Espanol")
option: str = utils.char()
if option == "1": LANGUAGE=1
elif option == "2": LANGUAGE=2
else: exit(1)
def cover() -> None:
utils.clear()
print(r''' `"~>v??*^;rikD&MNBQku*;` ''')
print(r''' `!{wQNWWWWWWWWWWWWWWWNWWWWWWNdi^` ''')
print(r''' .v9NWWWWNRFmWWWWWWWWWWWWga?vs0pNWWWMw! ''')
print(r''' !9WWWWWWU>`>&WWWWWWUH!_JNWWWWWQz ^EWWWWg| ''')
print(r''' _SWWWWWNe: /RWWWWWWNNHBRuyix&WWWWWg2?-"VNWWW6_ ''')
print(r''' "kWWWWWNz. .zNWWWWWWw=, ^NsLQNW**MWWWW&WQJuNWWWNr. ''')
print(r''' .FNWWWWNu. rL&WWWWWWg!!*;^Jo!*BN0aFx)>|!;;;;;!~\r)xFwaao?|, ''')
print(r''' .sNWWWWMi` -,#WWWWWWNi"` Siwu UWv .;^|^;` .!*lUSF*; ''')
print(r''' )BWWWWWo. 9NWWWWWW0; ;PvLc*aU&^ |L=-``.;>*= ;)wmkL_ ''')
print(r''' _QWWWWWq" .aWWWWWWWs` rF<>\^gQ, /i ,;;. !2 ,*k0F\` ''')
print(r''' *NWWWWNv ,/&WWWWWWNr "!SL92l)BU. ^x x. L, I_ `>P&F; ''')
print(r''' `2WWWWWg; !BWWWWWWD" .s;!\xNa /L, !L` P, .?&gr ''')
print(r''' ,QWWWWWS` >;LWWWWWWWk`_;!\u| ^Ml ;~!^, `iv `?Ng^ ''')
print(r''' ^BWWWWWi *i7NWWWWWWc "a;;?ii"~NV `;?}, ,9WF ''')
print(r''' >WWWWWB! ` ;8WWWWWWM= r>`;F/2wNc .;||!, oW#.''')
print(r''' ?WWWWW#" `2;7NWWWWW&_ =_=u%ir`>Wi PW6''')
print(r''' rWWWWWc `||>WWWWWWU. r^?7;!v*W) ,WW|''')
print(r''' ^NWWWB! ! \jrmWWWWWw `vL.k*\vkW$>rr*r;` ;rL{7)>!` mWF''')
print(r''' .BWWW$, ,u. PWWWWW) ,r`)|)!__LWv `;L" |s>:```._|JuL qWE''')
print(r''' uWWWH` .vi"Fo*WWWWN> ^v r*`>W} &Ws ''')
print(r''' ;WWWP` `=*ox_pWWWB; ^)i`9xr,#7W* . ,\*` |WW! ''')
print(r''' SWWD` >LLr^_y*NWWQ" ,<?P~|iF0W} ~; v_ `o; .0WU''')
print(r''' ^WW0,.!F2xULFi5WW0` >7vr!!z_`*Wv `|;;^!,~!` .8W8.''')
print(r''' dWN;`>JyrkIr`!NWN! ,uFia!9?*2WI ;QWD.''')
print(r''' =WW7`_S)~Fxv| xWWi ;}drqa=;=uWRNmL, rWWt` ''')
print(r''' DWP`;LiL;}c*rsWW&`,Po_e7L/ =Nc `>oD$aaw%ouic7)*r>=|^^~!;;;;;;;;;;;;;~^\>rvL{JctxiiiiuusoF2kgBS/ ''')
print(r''' ;WN\\Uy>*rF.,pWWWr-;?J"vov^^Nu `.,"_;!~^\=>r*v?LL{}Jjjjjjj}}7?vr>\^!;____-""",,,..`` ''')
print(r''' iW?_**>^;>"~&EeWg=|liv*s!~?NL''')
print(r''' wWc*$>*~~L6Ni QW! \Uursx >WJ''')
print(r''' 2M)o*_F "R0; .Wd~U7,``;*iN>''')
print(r''' xWe?vI7cMu` ,W&>xssr~=PB|''')
print(r''' "W% ,cBZ_ `M2l\/i,,QQ,''')
print(r''' |U$di_ UBu>i)yBy`''')
print(r''' ^Wx,rDR!''')
print(r''' \ZUl^''')
print(r'''.oPYo. .oPYo. .pPYo. .oPYo. o o .oPYo. o 8 o ''')
print(r'''8 .o8 `8 8 8 8 8 8 8 8 ''')
print(r'''8 .P`8 .oP` 8oPYo. 8 oPYo. .oPYo. .oPYo. o8P o8 o o .oPYo. `Yooo. o8P o o .oPYo8 o8 .oPYo. .oPYo. ''')
print(r'''8.d` 8 `b. 8` `8 8 8 `` 8oooo8 .oooo8 8 8 Y. .P 8oooo8 `8 8 8 8 8 8 8 8 8 Yb.. ''')
print(r'''8o` 8 :8 8. .P 8 8 8 8. 8 8 8 8 `b..d` 8. 8 8 8 8 8 8 8 8 8 `Yb. ''')
print(r'''`YooP` `YooP` `YooP` `YooP` 8 `Yooo` `YooP8 8 8 `YP` `Yooo` `YooP` 8 `YooP` `YooP` 8 `YooP` `YooP. ''')
print(r''':.....::.....::.....::::.....:..:::::.....::.....:::..::..::...:::.....::::.....:::..::.....::.....::..:.....::.....:''')
print(r''':::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::''')
print(r'''::::::::::::::::::: ::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::''')
def verify() -> None:
if version_info < (3, 5):
utils.clear(); printer("error",8); exit(1)
if platform != "linux":
utils.clear(); printer("error",0); exit(1)
if not commandverify("e4defrag"):
utils.clear(); printer("error",2); exit(1)
if not commandverify("fsck.ext4"):
utils.clear(); printer("error",3); exit(1)
if not commandverify("dialog"):
utils.clear(); printer("error",4); exit(1)
ext4listener(); printer("print",5)
spinner = utils.spinning()
for _ in range(15):
stdout.write(next(spinner))
stdout.flush(); sleep(0.1)
stdout.write('\b')
utils.clear()
def ext4listener(menuable: str = "", echoparts: str = "") -> list[str, str]:
COUNT: int = 0; EXTCOUNT: int = 0; MOUNTCOUNT: int = 0
ABSOLUTEPARTS: str = ""; DIRTYDEVS: list = []
EXTPARTS: list = []; PARTS: list = []
UMOUNTS: list[str, str] = []
ROOT: str = Popen(r"""df -h | sed -ne '/\/$/p' | cut -d" " -f1
""", shell=True, stdout=PIPE).stdout.read().decode('utf-8').replace("\n", "")
VERIFY: str = Popen(r"""find /dev/disk/by-id/ | sort -n | sed 's/^\/dev\/disk\/by-id\///'
""", shell=True, stdout=PIPE).stdout.read().decode('utf-8').split("\n")
for DEVICE in VERIFY:
DIRTYDEVS.append(Popen(f'readlink "/dev/disk/by-id/{DEVICE}"', shell=True, stdout=PIPE)
.stdout.read().decode('utf-8').rstrip().split("\n")[0])
DIRTYDEVS = list(filter(('').__ne__, DIRTYDEVS))
for DEV in DIRTYDEVS:
ABSOLUTEPARTS = Popen(f"""
echo {DEV} | sed 's/^\.\.\/\.\.\//\/dev\//' | sed '/.*[[:alpha:]]$/d' | sed '/blk[[:digit:]]$/d'""",
shell=True, stdout=PIPE).stdout.read().decode('utf-8').rstrip()
if ABSOLUTEPARTS != "":
if ABSOLUTEPARTS != ROOT:
PARTS.append(Popen(f"echo {DEV} | sed 's/^\.\.\/\.\.\///' | sed '/.*[[:alpha:]]$/d' | sed '/blk[[:digit:]]$/d'",
shell=True, stdout=PIPE).stdout.read()
.decode('utf-8').rstrip().split("\n")[0]); COUNT += 1
for PART in PARTS:
TYPE: str = Popen(f'''lsblk -f /dev/{PART} | sed -ne '2p' | cut -d " " -f2''',
shell=True, stdout=PIPE).stdout.read().decode('utf-8').rstrip()
if TYPE == "ext4": EXTCOUNT += 1; EXTPARTS.append(PART)
if (EXTCOUNT == 0):
utils.clear(); printer("error",6)
if menuable == "menu" : input(reader(4)); return
else: exit(1)
for PARTITIONSDEF in EXTPARTS:
MOUNTED: str = Popen(f"lsblk /dev/{PARTITIONSDEF} | sed -ne '/\//p'",
shell=True, stdout=PIPE).stdout.read().decode('utf-8').rstrip()
if MOUNTED != "": MOUNTCOUNT += 1
else: UMOUNTS.append([f"/dev/{PARTITIONSDEF}","ext4"])
if MOUNTCOUNT == EXTCOUNT :
utils.clear(); printer("error",7)
if(menuable == "menu"): input(reader(4)); return
else: exit(1)
if echoparts == "print": return UMOUNTS
def menu() -> None:
choices = [(reader(5),reader(1)),(reader(6),reader(2))]
response = d.menu(reader(0), 15, 60, 4, choices)
if(response[0] == "ok" and response[1] == reader(5)):
defragmenu()
else: exit(0)
def defragmenu() -> None:
utils.clear()
choices = ext4listener("menu","print")
response = d.menu(reader(0), 15, 50, 4, choices)
if(response[0] == "ok"):
defragaction(response[1])
else: exit(0)
def defragaction(part: str) -> None:
utils.clear()
if part == "": return
printer("print",8)
if call("sudo cat < /dev/null", shell=True) == 0:
if call(f"sudo fsck.ext4 -y -f -v {part}", shell=True) != 8:
print(" "); printer("print",10); input(reader(4)); utils.clear()
else: printer("print",9); input(reader(4)); menu(); return
printer("print",11)
if call(f"sudo fsck.ext4 -y -f -v -D {part}", shell=True) != 8:
print(" "); printer("print",10); input(reader(4)); utils.clear()
else: printer("print",9); input(reader(4)); menu(); return
system("mkdir /tmp/optimize 2> /dev/null")
system(f"sudo mount {part} /tmp/optimize")
printer("print",12); system(f"sudo e4defrag -v {part}")
print(" ")
system(f"sudo umount {part}"); printer("print",10)
input(reader(4)); utils.clear()
printer("print",13)
if call(f"sudo fsck.ext4 -y -f -v {part}", shell=True) != 8:
print(" "); printer("print",10); input(reader(4)); utils.clear(); menu()
else: printer("print",9); input(reader(4)); menu(); return
else: printer("print",9); input(reader(4)); menu(); return
class utils:
def clear() -> None: system('clear')
def char() -> str:
fd = stdin.fileno()
oldSettings = tcgetattr(fd)
try:
setcbreak(fd)
answer = stdin.read(1)
finally:
tcsetattr(fd, TCSADRAIN, oldSettings)
return answer
def spinning():
while True:
for cursor in '|/-\\':
yield cursor
if __name__ == "__main__":
main()
|
the-stack_0_18690 | # Copyright 2021 VMware, Inc.
# SPDX-License-Identifier: Apache-2.0
from typing import List
from vdk.internal.core.config import Configuration
from vdk.internal.core.config import ConfigurationBuilder
from vdk.internal.util.utils import parse_config_sequence
PROPERTIES_DEFAULT_TYPE = "PROPERTIES_DEFAULT_TYPE"
PROPERTIES_WRITE_PREPROCESS_SEQUENCE = "PROPERTIES_WRITE_PREPROCESS_SEQUENCE"
class PropertiesConfiguration:
def __init__(self, config: Configuration):
self.__config = config
def get_properties_default_type(self) -> str:
return self.__config.get_value(PROPERTIES_DEFAULT_TYPE)
def get_properties_write_preprocess_sequence(self) -> List[str]:
return parse_config_sequence(
self.__config, key=PROPERTIES_WRITE_PREPROCESS_SEQUENCE, sep=","
)
def add_definitions(config_builder: ConfigurationBuilder):
config_builder.add(
key=PROPERTIES_DEFAULT_TYPE,
default_value=None,
description="Set the default properties type to use. "
"Plugins can register different properties types. "
"This option controls which is in use"
"It can be left empty, in which case "
"if there is only one type registered it will use it."
"Or it will use one register with type 'default' ",
)
config_builder.add(
key="PROPERTIES_WRITE_PREPROCESS_SEQUENCE",
default_value=None,
description="""A string of comma-separated property types.
Those types are priorly registered in the IPropertiesRegistry, by
mapping a factory for instantiating each IPropertiesServiceClient
property type handler.
This comma-separated string value indicates the sequence in which those
IPropertiesServiceClient implementations `write_properties` method
will be invoked. For example:
PROPERTIES_WRITE_PREPROCESS_SEQUENCE="a-prefixed-property,
replicated-property"
would mean that the properties data stored would be first
processed by the `a-prefixed-property` client, then by the
`replicated-property` client, and finally would be stored by
the default properties client.
In case of pre-processing failure, the default client won't be invoked.
""",
)
|
the-stack_0_18694 | import base64
import email.mime.text
from email.message import EmailMessage
from email.base64mime import body_encode as encode_base64
import email.utils
import hashlib
import hmac
import socket
import smtplib
import io
import re
import sys
import time
import select
import errno
import textwrap
import threading
import unittest
from test import support, mock_socket
from test.support import hashlib_helper
from test.support import socket_helper
from test.support import threading_helper
from unittest.mock import Mock
import warnings
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
import asyncore
import smtpd
HOST = socket_helper.HOST
if sys.platform == 'darwin':
# select.poll returns a select.POLLHUP at the end of the tests
# on darwin, so just ignore it
def handle_expt(self):
pass
smtpd.SMTPChannel.handle_expt = handle_expt
def server(evt, buf, serv):
serv.listen()
evt.set()
try:
conn, addr = serv.accept()
except TimeoutError:
pass
else:
n = 500
while buf and n > 0:
r, w, e = select.select([], [conn], [])
if w:
sent = conn.send(buf)
buf = buf[sent:]
n -= 1
conn.close()
finally:
serv.close()
evt.set()
class GeneralTests:
def setUp(self):
smtplib.socket = mock_socket
self.port = 25
def tearDown(self):
smtplib.socket = socket
# This method is no longer used but is retained for backward compatibility,
# so test to make sure it still works.
def testQuoteData(self):
teststr = "abc\n.jkl\rfoo\r\n..blue"
expected = "abc\r\n..jkl\r\nfoo\r\n...blue"
self.assertEqual(expected, smtplib.quotedata(teststr))
def testBasic1(self):
mock_socket.reply_with(b"220 Hola mundo")
# connects
client = self.client(HOST, self.port)
client.close()
def testSourceAddress(self):
mock_socket.reply_with(b"220 Hola mundo")
# connects
client = self.client(HOST, self.port,
source_address=('127.0.0.1',19876))
self.assertEqual(client.source_address, ('127.0.0.1', 19876))
client.close()
def testBasic2(self):
mock_socket.reply_with(b"220 Hola mundo")
# connects, include port in host name
client = self.client("%s:%s" % (HOST, self.port))
client.close()
def testLocalHostName(self):
mock_socket.reply_with(b"220 Hola mundo")
# check that supplied local_hostname is used
client = self.client(HOST, self.port, local_hostname="testhost")
self.assertEqual(client.local_hostname, "testhost")
client.close()
def testTimeoutDefault(self):
mock_socket.reply_with(b"220 Hola mundo")
self.assertIsNone(mock_socket.getdefaulttimeout())
mock_socket.setdefaulttimeout(30)
self.assertEqual(mock_socket.getdefaulttimeout(), 30)
try:
client = self.client(HOST, self.port)
finally:
mock_socket.setdefaulttimeout(None)
self.assertEqual(client.sock.gettimeout(), 30)
client.close()
def testTimeoutNone(self):
mock_socket.reply_with(b"220 Hola mundo")
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
client = self.client(HOST, self.port, timeout=None)
finally:
socket.setdefaulttimeout(None)
self.assertIsNone(client.sock.gettimeout())
client.close()
def testTimeoutZero(self):
mock_socket.reply_with(b"220 Hola mundo")
with self.assertRaises(ValueError):
self.client(HOST, self.port, timeout=0)
def testTimeoutValue(self):
mock_socket.reply_with(b"220 Hola mundo")
client = self.client(HOST, self.port, timeout=30)
self.assertEqual(client.sock.gettimeout(), 30)
client.close()
def test_debuglevel(self):
mock_socket.reply_with(b"220 Hello world")
client = self.client()
client.set_debuglevel(1)
with support.captured_stderr() as stderr:
client.connect(HOST, self.port)
client.close()
expected = re.compile(r"^connect:", re.MULTILINE)
self.assertRegex(stderr.getvalue(), expected)
def test_debuglevel_2(self):
mock_socket.reply_with(b"220 Hello world")
client = self.client()
client.set_debuglevel(2)
with support.captured_stderr() as stderr:
client.connect(HOST, self.port)
client.close()
expected = re.compile(r"^\d{2}:\d{2}:\d{2}\.\d{6} connect: ",
re.MULTILINE)
self.assertRegex(stderr.getvalue(), expected)
class SMTPGeneralTests(GeneralTests, unittest.TestCase):
client = smtplib.SMTP
class LMTPGeneralTests(GeneralTests, unittest.TestCase):
client = smtplib.LMTP
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), "test requires Unix domain socket")
def testUnixDomainSocketTimeoutDefault(self):
local_host = '/some/local/lmtp/delivery/program'
mock_socket.reply_with(b"220 Hello world")
try:
client = self.client(local_host, self.port)
finally:
mock_socket.setdefaulttimeout(None)
self.assertIsNone(client.sock.gettimeout())
client.close()
def testTimeoutZero(self):
super().testTimeoutZero()
local_host = '/some/local/lmtp/delivery/program'
with self.assertRaises(ValueError):
self.client(local_host, timeout=0)
# Test server thread using the specified SMTP server class
def debugging_server(serv, serv_evt, client_evt):
serv_evt.set()
try:
if hasattr(select, 'poll'):
poll_fun = asyncore.poll2
else:
poll_fun = asyncore.poll
n = 1000
while asyncore.socket_map and n > 0:
poll_fun(0.01, asyncore.socket_map)
# when the client conversation is finished, it will
# set client_evt, and it's then ok to kill the server
if client_evt.is_set():
serv.close()
break
n -= 1
except TimeoutError:
pass
finally:
if not client_evt.is_set():
# allow some time for the client to read the result
time.sleep(0.5)
serv.close()
asyncore.close_all()
serv_evt.set()
MSG_BEGIN = '---------- MESSAGE FOLLOWS ----------\n'
MSG_END = '------------ END MESSAGE ------------\n'
# NOTE: Some SMTP objects in the tests below are created with a non-default
# local_hostname argument to the constructor, since (on some systems) the FQDN
# lookup caused by the default local_hostname sometimes takes so long that the
# test server times out, causing the test to fail.
# Test behavior of smtpd.DebuggingServer
class DebuggingServerTests(unittest.TestCase):
maxDiff = None
def setUp(self):
self.thread_key = threading_helper.threading_setup()
self.real_getfqdn = socket.getfqdn
socket.getfqdn = mock_socket.getfqdn
# temporarily replace sys.stdout to capture DebuggingServer output
self.old_stdout = sys.stdout
self.output = io.StringIO()
sys.stdout = self.output
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Capture SMTPChannel debug output
self.old_DEBUGSTREAM = smtpd.DEBUGSTREAM
smtpd.DEBUGSTREAM = io.StringIO()
# Pick a random unused port by passing 0 for the port number
self.serv = smtpd.DebuggingServer((HOST, 0), ('nowhere', -1),
decode_data=True)
# Keep a note of what server host and port were assigned
self.host, self.port = self.serv.socket.getsockname()[:2]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
socket.getfqdn = self.real_getfqdn
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
threading_helper.join_thread(self.thread)
# restore sys.stdout
sys.stdout = self.old_stdout
# restore DEBUGSTREAM
smtpd.DEBUGSTREAM.close()
smtpd.DEBUGSTREAM = self.old_DEBUGSTREAM
del self.thread
self.doCleanups()
threading_helper.threading_cleanup(*self.thread_key)
def get_output_without_xpeer(self):
test_output = self.output.getvalue()
return re.sub(r'(.*?)^X-Peer:\s*\S+\n(.*)', r'\1\2',
test_output, flags=re.MULTILINE|re.DOTALL)
def testBasic(self):
# connect
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
smtp.quit()
def testSourceAddress(self):
# connect
src_port = socket_helper.find_unused_port()
try:
smtp = smtplib.SMTP(self.host, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT,
source_address=(self.host, src_port))
self.addCleanup(smtp.close)
self.assertEqual(smtp.source_address, (self.host, src_port))
self.assertEqual(smtp.local_hostname, 'localhost')
smtp.quit()
except OSError as e:
if e.errno == errno.EADDRINUSE:
self.skipTest("couldn't bind to source port %d" % src_port)
raise
def testNOOP(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
expected = (250, b'OK')
self.assertEqual(smtp.noop(), expected)
smtp.quit()
def testRSET(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
expected = (250, b'OK')
self.assertEqual(smtp.rset(), expected)
smtp.quit()
def testELHO(self):
# EHLO isn't implemented in DebuggingServer
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
expected = (250, b'\nSIZE 33554432\nHELP')
self.assertEqual(smtp.ehlo(), expected)
smtp.quit()
def testEXPNNotImplemented(self):
# EXPN isn't implemented in DebuggingServer
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
expected = (502, b'EXPN not implemented')
smtp.putcmd('EXPN')
self.assertEqual(smtp.getreply(), expected)
smtp.quit()
def test_issue43124_putcmd_escapes_newline(self):
# see: https://bugs.python.org/issue43124
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
with self.assertRaises(ValueError) as exc:
smtp.putcmd('helo\nX-INJECTED')
self.assertIn("prohibited newline characters", str(exc.exception))
smtp.quit()
def testVRFY(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
expected = (252, b'Cannot VRFY user, but will accept message ' + \
b'and attempt delivery')
self.assertEqual(smtp.vrfy('[email protected]'), expected)
self.assertEqual(smtp.verify('[email protected]'), expected)
smtp.quit()
def testSecondHELO(self):
# check that a second HELO returns a message that it's a duplicate
# (this behavior is specific to smtpd.SMTPChannel)
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.helo()
expected = (503, b'Duplicate HELO/EHLO')
self.assertEqual(smtp.helo(), expected)
smtp.quit()
def testHELP(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
self.assertEqual(smtp.help(), b'Supported commands: EHLO HELO MAIL ' + \
b'RCPT DATA RSET NOOP QUIT VRFY')
smtp.quit()
def testSend(self):
# connect and send mail
m = 'A test message'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.sendmail('John', 'Sally', m)
# XXX(nnorwitz): this test is flaky and dies with a bad file descriptor
# in asyncore. This sleep might help, but should really be fixed
# properly by using an Event variable.
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m, MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
def testSendBinary(self):
m = b'A test message'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.sendmail('John', 'Sally', m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.decode('ascii'), MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
def testSendNeedingDotQuote(self):
# Issue 12283
m = '.A test\n.mes.sage.'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.sendmail('John', 'Sally', m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m, MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
def test_issue43124_escape_localhostname(self):
# see: https://bugs.python.org/issue43124
# connect and send mail
m = 'wazzuuup\nlinetwo'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='hi\nX-INJECTED',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
with self.assertRaises(ValueError) as exc:
smtp.sendmail("[email protected]", "[email protected]", m)
self.assertIn(
"prohibited newline characters: ehlo hi\\nX-INJECTED",
str(exc.exception),
)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
debugout = smtpd.DEBUGSTREAM.getvalue()
self.assertNotIn("X-INJECTED", debugout)
def test_issue43124_escape_options(self):
# see: https://bugs.python.org/issue43124
# connect and send mail
m = 'wazzuuup\nlinetwo'
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.sendmail("[email protected]", "[email protected]", m)
with self.assertRaises(ValueError) as exc:
smtp.mail("[email protected]", ["X-OPTION\nX-INJECTED-1", "X-OPTION2\nX-INJECTED-2"])
msg = str(exc.exception)
self.assertIn("prohibited newline characters", msg)
self.assertIn("X-OPTION\\nX-INJECTED-1 X-OPTION2\\nX-INJECTED-2", msg)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
debugout = smtpd.DEBUGSTREAM.getvalue()
self.assertNotIn("X-OPTION", debugout)
self.assertNotIn("X-OPTION2", debugout)
self.assertNotIn("X-INJECTED-1", debugout)
self.assertNotIn("X-INJECTED-2", debugout)
def testSendNullSender(self):
m = 'A test message'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.sendmail('<>', 'Sally', m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m, MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: <>$", re.MULTILINE)
self.assertRegex(debugout, sender)
def testSendMessage(self):
m = email.mime.text.MIMEText('A test message')
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.send_message(m, from_addr='John', to_addrs='Sally')
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Remove the X-Peer header that DebuggingServer adds as figuring out
# exactly what IP address format is put there is not easy (and
# irrelevant to our test). Typically 127.0.0.1 or ::1, but it is
# not always the same as socket.gethostbyname(HOST). :(
test_output = self.get_output_without_xpeer()
del m['X-Peer']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(test_output, mexpect)
def testSendMessageWithAddresses(self):
m = email.mime.text.MIMEText('A test message')
m['From'] = '[email protected]'
m['To'] = 'John'
m['CC'] = 'Sally, Fred'
m['Bcc'] = 'John Root <root@localhost>, "Dinsdale" <[email protected]>'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.send_message(m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
# make sure the Bcc header is still in the message.
self.assertEqual(m['Bcc'], 'John Root <root@localhost>, "Dinsdale" '
'<[email protected]>')
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Remove the X-Peer header that DebuggingServer adds.
test_output = self.get_output_without_xpeer()
del m['X-Peer']
# The Bcc header should not be transmitted.
del m['Bcc']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(test_output, mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: [email protected]$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Sally', 'Fred', 'root@localhost',
'[email protected]'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageWithSomeAddresses(self):
# Make sure nothing breaks if not all of the three 'to' headers exist
m = email.mime.text.MIMEText('A test message')
m['From'] = '[email protected]'
m['To'] = 'John, Dinsdale'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.send_message(m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Remove the X-Peer header that DebuggingServer adds.
test_output = self.get_output_without_xpeer()
del m['X-Peer']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(test_output, mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: [email protected]$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Dinsdale'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageWithSpecifiedAddresses(self):
# Make sure addresses specified in call override those in message.
m = email.mime.text.MIMEText('A test message')
m['From'] = '[email protected]'
m['To'] = 'John, Dinsdale'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.send_message(m, from_addr='[email protected]', to_addrs='[email protected]')
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Remove the X-Peer header that DebuggingServer adds.
test_output = self.get_output_without_xpeer()
del m['X-Peer']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(test_output, mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: [email protected]$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Dinsdale'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertNotRegex(debugout, to_addr)
recip = re.compile(r"^recips: .*'[email protected]'.*$", re.MULTILINE)
self.assertRegex(debugout, recip)
def testSendMessageWithMultipleFrom(self):
# Sender overrides To
m = email.mime.text.MIMEText('A test message')
m['From'] = 'Bernard, Bianca'
m['Sender'] = '[email protected]'
m['To'] = 'John, Dinsdale'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.send_message(m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Remove the X-Peer header that DebuggingServer adds.
test_output = self.get_output_without_xpeer()
del m['X-Peer']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(test_output, mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: [email protected]$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Dinsdale'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageResent(self):
m = email.mime.text.MIMEText('A test message')
m['From'] = '[email protected]'
m['To'] = 'John'
m['CC'] = 'Sally, Fred'
m['Bcc'] = 'John Root <root@localhost>, "Dinsdale" <[email protected]>'
m['Resent-Date'] = 'Thu, 1 Jan 1970 17:42:00 +0000'
m['Resent-From'] = '[email protected]'
m['Resent-To'] = 'Martha <[email protected]>, Jeff'
m['Resent-Bcc'] = '[email protected]'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.send_message(m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# The Resent-Bcc headers are deleted before serialization.
del m['Bcc']
del m['Resent-Bcc']
# Remove the X-Peer header that DebuggingServer adds.
test_output = self.get_output_without_xpeer()
del m['X-Peer']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(test_output, mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: [email protected]$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('[email protected]', 'Jeff', '[email protected]'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageMultipleResentRaises(self):
m = email.mime.text.MIMEText('A test message')
m['From'] = '[email protected]'
m['To'] = 'John'
m['CC'] = 'Sally, Fred'
m['Bcc'] = 'John Root <root@localhost>, "Dinsdale" <[email protected]>'
m['Resent-Date'] = 'Thu, 1 Jan 1970 17:42:00 +0000'
m['Resent-From'] = '[email protected]'
m['Resent-To'] = 'Martha <[email protected]>, Jeff'
m['Resent-Bcc'] = '[email protected]'
m['Resent-Date'] = 'Thu, 2 Jan 1970 17:42:00 +0000'
m['Resent-To'] = '[email protected]'
m['Resent-From'] = 'Martha <[email protected]>, Jeff'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
with self.assertRaises(ValueError):
smtp.send_message(m)
smtp.close()
class NonConnectingTests(unittest.TestCase):
def testNotConnected(self):
# Test various operations on an unconnected SMTP object that
# should raise exceptions (at present the attempt in SMTP.send
# to reference the nonexistent 'sock' attribute of the SMTP object
# causes an AttributeError)
smtp = smtplib.SMTP()
self.assertRaises(smtplib.SMTPServerDisconnected, smtp.ehlo)
self.assertRaises(smtplib.SMTPServerDisconnected,
smtp.send, 'test msg')
def testNonnumericPort(self):
# check that non-numeric port raises OSError
self.assertRaises(OSError, smtplib.SMTP,
"localhost", "bogus")
self.assertRaises(OSError, smtplib.SMTP,
"localhost:bogus")
def testSockAttributeExists(self):
# check that sock attribute is present outside of a connect() call
# (regression test, the previous behavior raised an
# AttributeError: 'SMTP' object has no attribute 'sock')
with smtplib.SMTP() as smtp:
self.assertIsNone(smtp.sock)
class DefaultArgumentsTests(unittest.TestCase):
def setUp(self):
self.msg = EmailMessage()
self.msg['From'] = 'Páolo <fő[email protected]>'
self.smtp = smtplib.SMTP()
self.smtp.ehlo = Mock(return_value=(200, 'OK'))
self.smtp.has_extn, self.smtp.sendmail = Mock(), Mock()
def testSendMessage(self):
expected_mail_options = ('SMTPUTF8', 'BODY=8BITMIME')
self.smtp.send_message(self.msg)
self.smtp.send_message(self.msg)
self.assertEqual(self.smtp.sendmail.call_args_list[0][0][3],
expected_mail_options)
self.assertEqual(self.smtp.sendmail.call_args_list[1][0][3],
expected_mail_options)
def testSendMessageWithMailOptions(self):
mail_options = ['STARTTLS']
expected_mail_options = ('STARTTLS', 'SMTPUTF8', 'BODY=8BITMIME')
self.smtp.send_message(self.msg, None, None, mail_options)
self.assertEqual(mail_options, ['STARTTLS'])
self.assertEqual(self.smtp.sendmail.call_args_list[0][0][3],
expected_mail_options)
# test response of client to a non-successful HELO message
class BadHELOServerTests(unittest.TestCase):
def setUp(self):
smtplib.socket = mock_socket
mock_socket.reply_with(b"199 no hello for you!")
self.old_stdout = sys.stdout
self.output = io.StringIO()
sys.stdout = self.output
self.port = 25
def tearDown(self):
smtplib.socket = socket
sys.stdout = self.old_stdout
def testFailingHELO(self):
self.assertRaises(smtplib.SMTPConnectError, smtplib.SMTP,
HOST, self.port, 'localhost', 3)
class TooLongLineTests(unittest.TestCase):
respdata = b'250 OK' + (b'.' * smtplib._MAXLINE * 2) + b'\n'
def setUp(self):
self.thread_key = threading_helper.threading_setup()
self.old_stdout = sys.stdout
self.output = io.StringIO()
sys.stdout = self.output
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(15)
self.port = socket_helper.bind_port(self.sock)
servargs = (self.evt, self.respdata, self.sock)
self.thread = threading.Thread(target=server, args=servargs)
self.thread.start()
self.evt.wait()
self.evt.clear()
def tearDown(self):
self.evt.wait()
sys.stdout = self.old_stdout
threading_helper.join_thread(self.thread)
del self.thread
self.doCleanups()
threading_helper.threading_cleanup(*self.thread_key)
def testLineTooLong(self):
self.assertRaises(smtplib.SMTPResponseException, smtplib.SMTP,
HOST, self.port, 'localhost', 3)
sim_users = {'[email protected]':'John A',
'[email protected]':'Sally B',
'[email protected]':'Ruth C',
}
sim_auth = ('[email protected]', 'somepassword')
sim_cram_md5_challenge = ('PENCeUxFREJoU0NnbmhNWitOMjNGNn'
'dAZWx3b29kLmlubm9zb2Z0LmNvbT4=')
sim_lists = {'list-1':['[email protected]','[email protected]'],
'list-2':['[email protected]',],
}
# Simulated SMTP channel & server
class ResponseException(Exception): pass
class SimSMTPChannel(smtpd.SMTPChannel):
quit_response = None
mail_response = None
rcpt_response = None
data_response = None
rcpt_count = 0
rset_count = 0
disconnect = 0
AUTH = 99 # Add protocol state to enable auth testing.
authenticated_user = None
def __init__(self, extra_features, *args, **kw):
self._extrafeatures = ''.join(
[ "250-{0}\r\n".format(x) for x in extra_features ])
super(SimSMTPChannel, self).__init__(*args, **kw)
# AUTH related stuff. It would be nice if support for this were in smtpd.
def found_terminator(self):
if self.smtp_state == self.AUTH:
line = self._emptystring.join(self.received_lines)
print('Data:', repr(line), file=smtpd.DEBUGSTREAM)
self.received_lines = []
try:
self.auth_object(line)
except ResponseException as e:
self.smtp_state = self.COMMAND
self.push('%s %s' % (e.smtp_code, e.smtp_error))
return
super().found_terminator()
def smtp_AUTH(self, arg):
if not self.seen_greeting:
self.push('503 Error: send EHLO first')
return
if not self.extended_smtp or 'AUTH' not in self._extrafeatures:
self.push('500 Error: command "AUTH" not recognized')
return
if self.authenticated_user is not None:
self.push(
'503 Bad sequence of commands: already authenticated')
return
args = arg.split()
if len(args) not in [1, 2]:
self.push('501 Syntax: AUTH <mechanism> [initial-response]')
return
auth_object_name = '_auth_%s' % args[0].lower().replace('-', '_')
try:
self.auth_object = getattr(self, auth_object_name)
except AttributeError:
self.push('504 Command parameter not implemented: unsupported '
' authentication mechanism {!r}'.format(auth_object_name))
return
self.smtp_state = self.AUTH
self.auth_object(args[1] if len(args) == 2 else None)
def _authenticated(self, user, valid):
if valid:
self.authenticated_user = user
self.push('235 Authentication Succeeded')
else:
self.push('535 Authentication credentials invalid')
self.smtp_state = self.COMMAND
def _decode_base64(self, string):
return base64.decodebytes(string.encode('ascii')).decode('utf-8')
def _auth_plain(self, arg=None):
if arg is None:
self.push('334 ')
else:
logpass = self._decode_base64(arg)
try:
*_, user, password = logpass.split('\0')
except ValueError as e:
self.push('535 Splitting response {!r} into user and password'
' failed: {}'.format(logpass, e))
return
self._authenticated(user, password == sim_auth[1])
def _auth_login(self, arg=None):
if arg is None:
# base64 encoded 'Username:'
self.push('334 VXNlcm5hbWU6')
elif not hasattr(self, '_auth_login_user'):
self._auth_login_user = self._decode_base64(arg)
# base64 encoded 'Password:'
self.push('334 UGFzc3dvcmQ6')
else:
password = self._decode_base64(arg)
self._authenticated(self._auth_login_user, password == sim_auth[1])
del self._auth_login_user
def _auth_buggy(self, arg=None):
# This AUTH mechanism will 'trap' client in a neverending 334
# base64 encoded 'BuGgYbUgGy'
self.push('334 QnVHZ1liVWdHeQ==')
def _auth_cram_md5(self, arg=None):
if arg is None:
self.push('334 {}'.format(sim_cram_md5_challenge))
else:
logpass = self._decode_base64(arg)
try:
user, hashed_pass = logpass.split()
except ValueError as e:
self.push('535 Splitting response {!r} into user and password '
'failed: {}'.format(logpass, e))
return False
valid_hashed_pass = hmac.HMAC(
sim_auth[1].encode('ascii'),
self._decode_base64(sim_cram_md5_challenge).encode('ascii'),
'md5').hexdigest()
self._authenticated(user, hashed_pass == valid_hashed_pass)
# end AUTH related stuff.
def smtp_EHLO(self, arg):
resp = ('250-testhost\r\n'
'250-EXPN\r\n'
'250-SIZE 20000000\r\n'
'250-STARTTLS\r\n'
'250-DELIVERBY\r\n')
resp = resp + self._extrafeatures + '250 HELP'
self.push(resp)
self.seen_greeting = arg
self.extended_smtp = True
def smtp_VRFY(self, arg):
# For max compatibility smtplib should be sending the raw address.
if arg in sim_users:
self.push('250 %s %s' % (sim_users[arg], smtplib.quoteaddr(arg)))
else:
self.push('550 No such user: %s' % arg)
def smtp_EXPN(self, arg):
list_name = arg.lower()
if list_name in sim_lists:
user_list = sim_lists[list_name]
for n, user_email in enumerate(user_list):
quoted_addr = smtplib.quoteaddr(user_email)
if n < len(user_list) - 1:
self.push('250-%s %s' % (sim_users[user_email], quoted_addr))
else:
self.push('250 %s %s' % (sim_users[user_email], quoted_addr))
else:
self.push('550 No access for you!')
def smtp_QUIT(self, arg):
if self.quit_response is None:
super(SimSMTPChannel, self).smtp_QUIT(arg)
else:
self.push(self.quit_response)
self.close_when_done()
def smtp_MAIL(self, arg):
if self.mail_response is None:
super().smtp_MAIL(arg)
else:
self.push(self.mail_response)
if self.disconnect:
self.close_when_done()
def smtp_RCPT(self, arg):
if self.rcpt_response is None:
super().smtp_RCPT(arg)
return
self.rcpt_count += 1
self.push(self.rcpt_response[self.rcpt_count-1])
def smtp_RSET(self, arg):
self.rset_count += 1
super().smtp_RSET(arg)
def smtp_DATA(self, arg):
if self.data_response is None:
super().smtp_DATA(arg)
else:
self.push(self.data_response)
def handle_error(self):
raise
class SimSMTPServer(smtpd.SMTPServer):
channel_class = SimSMTPChannel
def __init__(self, *args, **kw):
self._extra_features = []
self._addresses = {}
smtpd.SMTPServer.__init__(self, *args, **kw)
def handle_accepted(self, conn, addr):
self._SMTPchannel = self.channel_class(
self._extra_features, self, conn, addr,
decode_data=self._decode_data)
def process_message(self, peer, mailfrom, rcpttos, data):
self._addresses['from'] = mailfrom
self._addresses['tos'] = rcpttos
def add_feature(self, feature):
self._extra_features.append(feature)
def handle_error(self):
raise
# Test various SMTP & ESMTP commands/behaviors that require a simulated server
# (i.e., something with more features than DebuggingServer)
class SMTPSimTests(unittest.TestCase):
def setUp(self):
self.thread_key = threading_helper.threading_setup()
self.real_getfqdn = socket.getfqdn
socket.getfqdn = mock_socket.getfqdn
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Pick a random unused port by passing 0 for the port number
self.serv = SimSMTPServer((HOST, 0), ('nowhere', -1), decode_data=True)
# Keep a note of what port was assigned
self.port = self.serv.socket.getsockname()[1]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
socket.getfqdn = self.real_getfqdn
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
threading_helper.join_thread(self.thread)
del self.thread
self.doCleanups()
threading_helper.threading_cleanup(*self.thread_key)
def testBasic(self):
# smoke test
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
smtp.quit()
def testEHLO(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
# no features should be present before the EHLO
self.assertEqual(smtp.esmtp_features, {})
# features expected from the test server
expected_features = {'expn':'',
'size': '20000000',
'starttls': '',
'deliverby': '',
'help': '',
}
smtp.ehlo()
self.assertEqual(smtp.esmtp_features, expected_features)
for k in expected_features:
self.assertTrue(smtp.has_extn(k))
self.assertFalse(smtp.has_extn('unsupported-feature'))
smtp.quit()
def testVRFY(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
for addr_spec, name in sim_users.items():
expected_known = (250, bytes('%s %s' %
(name, smtplib.quoteaddr(addr_spec)),
"ascii"))
self.assertEqual(smtp.vrfy(addr_spec), expected_known)
u = '[email protected]'
expected_unknown = (550, ('No such user: %s' % u).encode('ascii'))
self.assertEqual(smtp.vrfy(u), expected_unknown)
smtp.quit()
def testEXPN(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
for listname, members in sim_lists.items():
users = []
for m in members:
users.append('%s %s' % (sim_users[m], smtplib.quoteaddr(m)))
expected_known = (250, bytes('\n'.join(users), "ascii"))
self.assertEqual(smtp.expn(listname), expected_known)
u = 'PSU-Members-List'
expected_unknown = (550, b'No access for you!')
self.assertEqual(smtp.expn(u), expected_unknown)
smtp.quit()
def testAUTH_PLAIN(self):
self.serv.add_feature("AUTH PLAIN")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
resp = smtp.login(sim_auth[0], sim_auth[1])
self.assertEqual(resp, (235, b'Authentication Succeeded'))
smtp.close()
def testAUTH_LOGIN(self):
self.serv.add_feature("AUTH LOGIN")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
resp = smtp.login(sim_auth[0], sim_auth[1])
self.assertEqual(resp, (235, b'Authentication Succeeded'))
smtp.close()
def testAUTH_LOGIN_initial_response_ok(self):
self.serv.add_feature("AUTH LOGIN")
with smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT) as smtp:
smtp.user, smtp.password = sim_auth
smtp.ehlo("test_auth_login")
resp = smtp.auth("LOGIN", smtp.auth_login, initial_response_ok=True)
self.assertEqual(resp, (235, b'Authentication Succeeded'))
def testAUTH_LOGIN_initial_response_notok(self):
self.serv.add_feature("AUTH LOGIN")
with smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT) as smtp:
smtp.user, smtp.password = sim_auth
smtp.ehlo("test_auth_login")
resp = smtp.auth("LOGIN", smtp.auth_login, initial_response_ok=False)
self.assertEqual(resp, (235, b'Authentication Succeeded'))
def testAUTH_BUGGY(self):
self.serv.add_feature("AUTH BUGGY")
def auth_buggy(challenge=None):
self.assertEqual(b"BuGgYbUgGy", challenge)
return "\0"
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT
)
try:
smtp.user, smtp.password = sim_auth
smtp.ehlo("test_auth_buggy")
expect = r"^Server AUTH mechanism infinite loop.*"
with self.assertRaisesRegex(smtplib.SMTPException, expect) as cm:
smtp.auth("BUGGY", auth_buggy, initial_response_ok=False)
finally:
smtp.close()
@hashlib_helper.requires_hashdigest('md5', openssl=True)
def testAUTH_CRAM_MD5(self):
self.serv.add_feature("AUTH CRAM-MD5")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
resp = smtp.login(sim_auth[0], sim_auth[1])
self.assertEqual(resp, (235, b'Authentication Succeeded'))
smtp.close()
@hashlib_helper.requires_hashdigest('md5', openssl=True)
def testAUTH_multiple(self):
# Test that multiple authentication methods are tried.
self.serv.add_feature("AUTH BOGUS PLAIN LOGIN CRAM-MD5")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
resp = smtp.login(sim_auth[0], sim_auth[1])
self.assertEqual(resp, (235, b'Authentication Succeeded'))
smtp.close()
def test_auth_function(self):
supported = {'PLAIN', 'LOGIN'}
try:
hashlib.md5()
except ValueError:
pass
else:
supported.add('CRAM-MD5')
for mechanism in supported:
self.serv.add_feature("AUTH {}".format(mechanism))
for mechanism in supported:
with self.subTest(mechanism=mechanism):
smtp = smtplib.SMTP(HOST, self.port,
local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
smtp.ehlo('foo')
smtp.user, smtp.password = sim_auth[0], sim_auth[1]
method = 'auth_' + mechanism.lower().replace('-', '_')
resp = smtp.auth(mechanism, getattr(smtp, method))
self.assertEqual(resp, (235, b'Authentication Succeeded'))
smtp.close()
def test_quit_resets_greeting(self):
smtp = smtplib.SMTP(HOST, self.port,
local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
code, message = smtp.ehlo()
self.assertEqual(code, 250)
self.assertIn('size', smtp.esmtp_features)
smtp.quit()
self.assertNotIn('size', smtp.esmtp_features)
smtp.connect(HOST, self.port)
self.assertNotIn('size', smtp.esmtp_features)
smtp.ehlo_or_helo_if_needed()
self.assertIn('size', smtp.esmtp_features)
smtp.quit()
def test_with_statement(self):
with smtplib.SMTP(HOST, self.port) as smtp:
code, message = smtp.noop()
self.assertEqual(code, 250)
self.assertRaises(smtplib.SMTPServerDisconnected, smtp.send, b'foo')
with smtplib.SMTP(HOST, self.port) as smtp:
smtp.close()
self.assertRaises(smtplib.SMTPServerDisconnected, smtp.send, b'foo')
def test_with_statement_QUIT_failure(self):
with self.assertRaises(smtplib.SMTPResponseException) as error:
with smtplib.SMTP(HOST, self.port) as smtp:
smtp.noop()
self.serv._SMTPchannel.quit_response = '421 QUIT FAILED'
self.assertEqual(error.exception.smtp_code, 421)
self.assertEqual(error.exception.smtp_error, b'QUIT FAILED')
#TODO: add tests for correct AUTH method fallback now that the
#test infrastructure can support it.
# Issue 17498: make sure _rset does not raise SMTPServerDisconnected exception
def test__rest_from_mail_cmd(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
smtp.noop()
self.serv._SMTPchannel.mail_response = '451 Requested action aborted'
self.serv._SMTPchannel.disconnect = True
with self.assertRaises(smtplib.SMTPSenderRefused):
smtp.sendmail('John', 'Sally', 'test message')
self.assertIsNone(smtp.sock)
# Issue 5713: make sure close, not rset, is called if we get a 421 error
def test_421_from_mail_cmd(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
smtp.noop()
self.serv._SMTPchannel.mail_response = '421 closing connection'
with self.assertRaises(smtplib.SMTPSenderRefused):
smtp.sendmail('John', 'Sally', 'test message')
self.assertIsNone(smtp.sock)
self.assertEqual(self.serv._SMTPchannel.rset_count, 0)
def test_421_from_rcpt_cmd(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
smtp.noop()
self.serv._SMTPchannel.rcpt_response = ['250 accepted', '421 closing']
with self.assertRaises(smtplib.SMTPRecipientsRefused) as r:
smtp.sendmail('John', ['Sally', 'Frank', 'George'], 'test message')
self.assertIsNone(smtp.sock)
self.assertEqual(self.serv._SMTPchannel.rset_count, 0)
self.assertDictEqual(r.exception.args[0], {'Frank': (421, b'closing')})
def test_421_from_data_cmd(self):
class MySimSMTPChannel(SimSMTPChannel):
def found_terminator(self):
if self.smtp_state == self.DATA:
self.push('421 closing')
else:
super().found_terminator()
self.serv.channel_class = MySimSMTPChannel
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
smtp.noop()
with self.assertRaises(smtplib.SMTPDataError):
smtp.sendmail('[email protected]', ['[email protected]'], 'test message')
self.assertIsNone(smtp.sock)
self.assertEqual(self.serv._SMTPchannel.rcpt_count, 0)
def test_smtputf8_NotSupportedError_if_no_server_support(self):
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.ehlo()
self.assertTrue(smtp.does_esmtp)
self.assertFalse(smtp.has_extn('smtputf8'))
self.assertRaises(
smtplib.SMTPNotSupportedError,
smtp.sendmail,
'John', 'Sally', '', mail_options=['BODY=8BITMIME', 'SMTPUTF8'])
self.assertRaises(
smtplib.SMTPNotSupportedError,
smtp.mail, 'John', options=['BODY=8BITMIME', 'SMTPUTF8'])
def test_send_unicode_without_SMTPUTF8(self):
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
self.assertRaises(UnicodeEncodeError, smtp.sendmail, 'Alice', 'Böb', '')
self.assertRaises(UnicodeEncodeError, smtp.mail, 'Älice')
def test_send_message_error_on_non_ascii_addrs_if_no_smtputf8(self):
# This test is located here and not in the SMTPUTF8SimTests
# class because it needs a "regular" SMTP server to work
msg = EmailMessage()
msg['From'] = "Páolo <fő[email protected]>"
msg['To'] = 'Dinsdale'
msg['Subject'] = 'Nudge nudge, wink, wink \u1F609'
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
with self.assertRaises(smtplib.SMTPNotSupportedError):
smtp.send_message(msg)
def test_name_field_not_included_in_envelop_addresses(self):
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
message = EmailMessage()
message['From'] = email.utils.formataddr(('Michaël', '[email protected]'))
message['To'] = email.utils.formataddr(('René', '[email protected]'))
self.assertDictEqual(smtp.send_message(message), {})
self.assertEqual(self.serv._addresses['from'], '[email protected]')
self.assertEqual(self.serv._addresses['tos'], ['[email protected]'])
class SimSMTPUTF8Server(SimSMTPServer):
def __init__(self, *args, **kw):
# The base SMTP server turns these on automatically, but our test
# server is set up to munge the EHLO response, so we need to provide
# them as well. And yes, the call is to SMTPServer not SimSMTPServer.
self._extra_features = ['SMTPUTF8', '8BITMIME']
smtpd.SMTPServer.__init__(self, *args, **kw)
def handle_accepted(self, conn, addr):
self._SMTPchannel = self.channel_class(
self._extra_features, self, conn, addr,
decode_data=self._decode_data,
enable_SMTPUTF8=self.enable_SMTPUTF8,
)
def process_message(self, peer, mailfrom, rcpttos, data, mail_options=None,
rcpt_options=None):
self.last_peer = peer
self.last_mailfrom = mailfrom
self.last_rcpttos = rcpttos
self.last_message = data
self.last_mail_options = mail_options
self.last_rcpt_options = rcpt_options
class SMTPUTF8SimTests(unittest.TestCase):
maxDiff = None
def setUp(self):
self.thread_key = threading_helper.threading_setup()
self.real_getfqdn = socket.getfqdn
socket.getfqdn = mock_socket.getfqdn
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Pick a random unused port by passing 0 for the port number
self.serv = SimSMTPUTF8Server((HOST, 0), ('nowhere', -1),
decode_data=False,
enable_SMTPUTF8=True)
# Keep a note of what port was assigned
self.port = self.serv.socket.getsockname()[1]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
socket.getfqdn = self.real_getfqdn
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
threading_helper.join_thread(self.thread)
del self.thread
self.doCleanups()
threading_helper.threading_cleanup(*self.thread_key)
def test_test_server_supports_extensions(self):
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.ehlo()
self.assertTrue(smtp.does_esmtp)
self.assertTrue(smtp.has_extn('smtputf8'))
def test_send_unicode_with_SMTPUTF8_via_sendmail(self):
m = '¡a test message containing unicode!'.encode('utf-8')
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.sendmail('Jőhn', 'Sálly', m,
mail_options=['BODY=8BITMIME', 'SMTPUTF8'])
self.assertEqual(self.serv.last_mailfrom, 'Jőhn')
self.assertEqual(self.serv.last_rcpttos, ['Sálly'])
self.assertEqual(self.serv.last_message, m)
self.assertIn('BODY=8BITMIME', self.serv.last_mail_options)
self.assertIn('SMTPUTF8', self.serv.last_mail_options)
self.assertEqual(self.serv.last_rcpt_options, [])
def test_send_unicode_with_SMTPUTF8_via_low_level_API(self):
m = '¡a test message containing unicode!'.encode('utf-8')
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.ehlo()
self.assertEqual(
smtp.mail('Jő', options=['BODY=8BITMIME', 'SMTPUTF8']),
(250, b'OK'))
self.assertEqual(smtp.rcpt('János'), (250, b'OK'))
self.assertEqual(smtp.data(m), (250, b'OK'))
self.assertEqual(self.serv.last_mailfrom, 'Jő')
self.assertEqual(self.serv.last_rcpttos, ['János'])
self.assertEqual(self.serv.last_message, m)
self.assertIn('BODY=8BITMIME', self.serv.last_mail_options)
self.assertIn('SMTPUTF8', self.serv.last_mail_options)
self.assertEqual(self.serv.last_rcpt_options, [])
def test_send_message_uses_smtputf8_if_addrs_non_ascii(self):
msg = EmailMessage()
msg['From'] = "Páolo <fő[email protected]>"
msg['To'] = 'Dinsdale'
msg['Subject'] = 'Nudge nudge, wink, wink \u1F609'
# XXX I don't know why I need two \n's here, but this is an existing
# bug (if it is one) and not a problem with the new functionality.
msg.set_content("oh là là, know what I mean, know what I mean?\n\n")
# XXX smtpd converts received /r/n to /n, so we can't easily test that
# we are successfully sending /r/n :(.
expected = textwrap.dedent("""\
From: Páolo <fő[email protected]>
To: Dinsdale
Subject: Nudge nudge, wink, wink \u1F609
Content-Type: text/plain; charset="utf-8"
Content-Transfer-Encoding: 8bit
MIME-Version: 1.0
oh là là, know what I mean, know what I mean?
""")
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
self.assertEqual(smtp.send_message(msg), {})
self.assertEqual(self.serv.last_mailfrom, 'fő[email protected]')
self.assertEqual(self.serv.last_rcpttos, ['Dinsdale'])
self.assertEqual(self.serv.last_message.decode(), expected)
self.assertIn('BODY=8BITMIME', self.serv.last_mail_options)
self.assertIn('SMTPUTF8', self.serv.last_mail_options)
self.assertEqual(self.serv.last_rcpt_options, [])
EXPECTED_RESPONSE = encode_base64(b'\0psu\0doesnotexist', eol='')
class SimSMTPAUTHInitialResponseChannel(SimSMTPChannel):
def smtp_AUTH(self, arg):
# RFC 4954's AUTH command allows for an optional initial-response.
# Not all AUTH methods support this; some require a challenge. AUTH
# PLAIN does those, so test that here. See issue #15014.
args = arg.split()
if args[0].lower() == 'plain':
if len(args) == 2:
# AUTH PLAIN <initial-response> with the response base 64
# encoded. Hard code the expected response for the test.
if args[1] == EXPECTED_RESPONSE:
self.push('235 Ok')
return
self.push('571 Bad authentication')
class SimSMTPAUTHInitialResponseServer(SimSMTPServer):
channel_class = SimSMTPAUTHInitialResponseChannel
class SMTPAUTHInitialResponseSimTests(unittest.TestCase):
def setUp(self):
self.thread_key = threading_helper.threading_setup()
self.real_getfqdn = socket.getfqdn
socket.getfqdn = mock_socket.getfqdn
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Pick a random unused port by passing 0 for the port number
self.serv = SimSMTPAUTHInitialResponseServer(
(HOST, 0), ('nowhere', -1), decode_data=True)
# Keep a note of what port was assigned
self.port = self.serv.socket.getsockname()[1]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
socket.getfqdn = self.real_getfqdn
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
threading_helper.join_thread(self.thread)
del self.thread
self.doCleanups()
threading_helper.threading_cleanup(*self.thread_key)
def testAUTH_PLAIN_initial_response_login(self):
self.serv.add_feature('AUTH PLAIN')
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
smtp.login('psu', 'doesnotexist')
smtp.close()
def testAUTH_PLAIN_initial_response_auth(self):
self.serv.add_feature('AUTH PLAIN')
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
smtp.user = 'psu'
smtp.password = 'doesnotexist'
code, response = smtp.auth('plain', smtp.auth_plain)
smtp.close()
self.assertEqual(code, 235)
if __name__ == '__main__':
unittest.main()
|
the-stack_0_18695 | # Copyright (c) 2021 Cristian Patrasciuc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import functools
import multiprocessing
import os.path
import random
import time
from typing import List, Dict
import pandas
from pandas import DataFrame
from statsmodels.stats.proportion import proportion_confint
from ai.eval.players import PLAYER_NAMES
from ai.player import Player
from main_wrapper import main_wrapper
from model.bummerl import Bummerl
from model.game_state import GameState
from model.player_id import PlayerId
from model.player_pair import PlayerPair
MetricsDict = Dict[str, PlayerPair]
_DATAFRAME_METRICS = ["bummerls", "games", "game_points", "trick_points"]
def _get_metrics_column_names():
columns = []
for metric_name in _DATAFRAME_METRICS:
columns.extend([f"{metric_name}_one", f"{metric_name}_two"])
return columns
def _get_results_row(metrics):
row = []
for metric_name in _DATAFRAME_METRICS:
metric_value = metrics.get(metric_name, PlayerPair(None, None))
row.extend([metric_value.one, metric_value.two])
return row
def _accumulate_player_pair(acc: PlayerPair[int], other: PlayerPair[int]):
acc.one += other.one
acc.two += other.two
def _prop_confidence_interval(pair: PlayerPair[int]):
nobs = pair.one + pair.two
count = pair.one
ci_low, ci_upp = proportion_confint(count, nobs, alpha=0.05, method='wilson')
begin = ""
if ci_low > 0.5:
begin = "\033[92m" # green
elif ci_upp < 0.5:
begin = "\033[91m" # red
end = "\033[0m" if begin != "" else ""
low = "{:.2%}".format(ci_low)
upp = "{:.2%}".format(ci_upp)
mean = "{:.2%}".format(count / nobs)
return f"{begin}{mean} [{low}, {upp}]{end}"
def _print_pair(label: str, pair: PlayerPair, compute_ci: bool = True):
ci_text = ""
if compute_ci and pair != PlayerPair(0, 0):
ci_text = _prop_confidence_interval(pair)
print(f"{label}: {pair.one}:{pair.two} {ci_text}")
def _print_metrics(metrics: MetricsDict):
for metric_name, metric_value in metrics.items():
compute_ci = metric_name in ["bummerls", "games", "bummerls_of_interest",
"games_of_interest"]
_print_pair(metric_name, metric_value, compute_ci)
print()
def accumulate_metrics(acc_metrics: MetricsDict, metrics: MetricsDict):
for metric_name in acc_metrics.keys():
acc_metrics[metric_name].one += metrics[metric_name].one
acc_metrics[metric_name].two += metrics[metric_name].two
def _request_next_action_and_time_it(game_view: GameState,
game_points: PlayerPair[int],
player: Player):
start_perf = time.perf_counter()
start_process = time.process_time()
action = player.request_next_action(game_view, game_points)
end_perf = time.perf_counter()
end_process = time.process_time()
return action, end_perf - start_perf, end_process - start_process
def evaluate_player_pair_in_process(num_bummerls: int,
players: PlayerPair[str]) -> MetricsDict:
# pylint: disable=too-many-locals,too-many-branches,too-many-statements
players = PlayerPair(PLAYER_NAMES[players.one](PlayerId.ONE),
PLAYER_NAMES[players.two](PlayerId.TWO))
# Initialize the metrics.
bummerls = PlayerPair(0, 0)
game_points = PlayerPair(0, 0)
games = PlayerPair(0, 0)
trick_points = PlayerPair(0, 0)
bummerls_of_interest = PlayerPair(0, 0)
games_of_interest = PlayerPair(0, 0)
perf_counter_sum = PlayerPair(0, 0)
process_time_sum = PlayerPair(0, 0)
num_actions_requested = PlayerPair(0, 0)
random_seed_generator = random.Random()
# Simulate the games and update the metrics accordingly.
for i in range(num_bummerls):
print(f"\rSimulating bummerl {i} out of {num_bummerls} ({bummerls})...",
end="")
bummerl = Bummerl()
is_bummerl_of_interest = False
while not bummerl.is_over:
bummerl.start_game(seed=random_seed_generator.random())
players.one.game_of_interest = False
players.two.game_of_interest = False
game = bummerl.game
while not game.game_state.is_game_over:
player_id = game.game_state.next_player
if players[player_id].cheater:
game_view = game.game_state
else:
game_view = game.game_state.next_player_view()
action, perf_counter, process_time = _request_next_action_and_time_it(
game_view, bummerl.game_points, players[player_id])
game.play_action(action)
perf_counter_sum[player_id] += perf_counter
process_time_sum[player_id] += process_time
num_actions_requested[player_id] += 1
is_game_of_interest = \
players.one.game_of_interest or players.two.game_of_interest
if is_game_of_interest:
is_bummerl_of_interest = True
_accumulate_player_pair(trick_points, game.game_state.trick_points)
last_game_points = game.game_state.game_points
if last_game_points.one > 0:
games.one += 1
if is_game_of_interest:
games_of_interest.one += 1
else:
games.two += 1
if is_game_of_interest:
games_of_interest.two += 1
_accumulate_player_pair(game_points, last_game_points)
bummerl.finalize_game()
if bummerl.game_points.one > 6:
bummerls.one += 1
if is_bummerl_of_interest:
bummerls_of_interest.one += 1
else:
bummerls.two += 1
if is_bummerl_of_interest:
bummerls_of_interest.two += 1
print(end="\r")
return {"bummerls": bummerls, "games": games, "game_points": game_points,
"trick_points": trick_points,
"bummerls_of_interest": bummerls_of_interest,
"games_of_interest": games_of_interest,
"perf_counter_sum": perf_counter_sum,
"process_time_sum": process_time_sum,
"num_actions_requested": num_actions_requested}
def evaluate_player_pair_in_parallel(players: PlayerPair[str],
num_bummerls: int = 1000,
num_processes: int = 4) -> MetricsDict:
num_bummerls_per_process = num_bummerls // num_processes
num_bummerls_to_run = [num_bummerls_per_process] * num_processes
for i in range(num_bummerls % num_processes):
num_bummerls_to_run[i] += 1
print(f"Number of bummerls for each worker: {num_bummerls_to_run}")
with multiprocessing.Pool(processes=num_processes) as pool:
metrics_dicts = pool.map(
functools.partial(evaluate_player_pair_in_process, players=players),
num_bummerls_to_run)
merged_metrics = metrics_dicts[0]
for metric_dict in metrics_dicts[1:]:
accumulate_metrics(merged_metrics, metric_dict)
return merged_metrics
def evaluate_one_player_vs_opponent_list(player: str,
opponents: List[str]) -> DataFrame:
rows = []
for opponent in opponents:
print(f"Simulating {player} vs {opponent}")
players = PlayerPair(player, opponent)
metrics = evaluate_player_pair_in_parallel(players)
_print_metrics(metrics)
rows.append([player, opponent] + _get_results_row(metrics))
columns = ["player_one", "player_two"] + _get_metrics_column_names()
return pandas.DataFrame(rows, columns=columns)
def evaluate_all_player_pairs(player_names: List[str] = None) -> DataFrame:
dataframes = []
player_names = player_names or list(PLAYER_NAMES)
for i, player_to_evaluate in enumerate(player_names):
dataframes.append(evaluate_one_player_vs_opponent_list(
player_to_evaluate, player_names[:i + 1]))
return pandas.concat(dataframes)
def main():
dataframe = evaluate_all_player_pairs()
# noinspection PyTypeChecker
dataframe.to_csv(os.path.join(os.path.dirname(__file__), "eval_results.csv"),
index=False)
if __name__ == "__main__":
main_wrapper(main)
|
the-stack_0_18696 | """Loading toy datasets.
Refer to notebook: `geomstats/notebooks/01_data_on_manifolds.ipynb`
to visualize these datasets.
"""
import csv
import json
import os
import pandas as pd
import geomstats.backend as gs
from geomstats.datasets.prepare_graph_data import Graph
from geomstats.geometry.hypersphere import Hypersphere
from geomstats.geometry.skew_symmetric_matrices import SkewSymmetricMatrices
from geomstats.geometry.special_orthogonal import SpecialOrthogonal
MODULE_PATH = os.path.dirname(__file__)
DATA_PATH = os.path.join(MODULE_PATH, 'data')
CITIES_PATH = os.path.join(DATA_PATH, 'cities', 'cities.json')
CONNECTOMES_PATH = os.path.join(DATA_PATH, 'connectomes/train_FNC.csv')
CONNECTOMES_LABELS_PATH = os.path.join(
DATA_PATH, 'connectomes/train_labels.csv')
POSES_PATH = os.path.join(DATA_PATH, 'poses', 'poses.json')
KARATE_PATH = os.path.join(DATA_PATH, 'graph_karate', 'karate.txt')
KARATE_LABELS_PATH = os.path.join(
DATA_PATH, 'graph_karate', 'karate_labels.txt')
GRAPH_RANDOM_PATH = os.path.join(DATA_PATH, 'graph_random', 'graph_random.txt')
GRAPH_RANDOM_LABELS_PATH = os.path.join(
DATA_PATH, 'graph_random', 'graph_random_labels.txt'
)
LEAVES_PATH = os.path.join(DATA_PATH, 'leaves', 'leaves.csv')
EMG_PATH = os.path.join(DATA_PATH, 'emg', 'emg.csv')
OPTICAL_NERVES_PATH = os.path.join(
DATA_PATH, 'optical_nerves', 'optical_nerves.txt')
HANDS_PATH = os.path.join(DATA_PATH, 'hands', 'hands.txt')
HANDS_LABELS_PATH = os.path.join(DATA_PATH, 'hands', 'labels.txt')
CELLS_PATH = os.path.join(DATA_PATH, 'cells', 'cells.txt')
CELL_LINES_PATH = os.path.join(DATA_PATH, 'cells', 'cell_lines.txt')
CELL_TREATMENTS_PATH = os.path.join(DATA_PATH, 'cells', 'treatments.txt')
def load_cities():
"""Load data from data/cities/cities.json.
Returns
-------
data : array-like, shape=[50, 2]
Array with each row representing one sample,
i. e. latitude and longitude of a city.
Angles are in radians.
name : list
List of city names.
"""
with open(CITIES_PATH, encoding='utf-8') as json_file:
data_file = json.load(json_file)
names = [row['city'] for row in data_file]
data = list(
map(
lambda row: [
row[col_name] / 180 * gs.pi for col_name in ['lat', 'lng']
],
data_file,
)
)
data = gs.array(data)
colat = gs.pi / 2 - data[:, 0]
colat = gs.expand_dims(colat, axis=1)
lng = gs.expand_dims(data[:, 1] + gs.pi, axis=1)
data = gs.concatenate([colat, lng], axis=1)
sphere = Hypersphere(dim=2)
data = sphere.spherical_to_extrinsic(data)
return data, names
def load_random_graph():
"""Load data from data/graph_random.
Returns
-------
graph: prepare_graph_data.Graph
Graph containing nodes, edges, and labels from the random dataset.
"""
return Graph(GRAPH_RANDOM_PATH, GRAPH_RANDOM_LABELS_PATH)
def load_karate_graph():
"""Load data from data/graph_karate.
Returns
-------
graph: prepare_graph_data.Graph
Graph containing nodes, edges, and labels from the karate dataset.
"""
return Graph(KARATE_PATH, KARATE_LABELS_PATH)
def load_poses(only_rotations=True):
"""Load data from data/poses/poses.csv.
Returns
-------
data : array-like, shape=[5, 3] or shape=[5, 6]
Array with each row representing one sample,
i. e. one 3D rotation or one 3D rotation + 3D translation.
img_paths : list
List of img paths.
"""
data = []
img_paths = []
so3 = SpecialOrthogonal(n=3, point_type='vector')
with open(POSES_PATH) as json_file:
data_file = json.load(json_file)
for row in data_file:
pose_mat = gs.array(row['rot_mat'])
pose_vec = so3.rotation_vector_from_matrix(pose_mat)
if not only_rotations:
trans_vec = gs.array(row['trans_mat'])
pose_vec = gs.concatenate([pose_vec, trans_vec], axis=-1)
data.append(pose_vec)
img_paths.append(row['img'])
data = gs.array(data)
return data, img_paths
def load_connectomes(as_vectors=False):
"""Load data from brain connectomes.
Load the correlation data from the kaggle MSLP 2014 Schizophrenia
Challenge. The original data came as flattened vectors, but if `raw=True`
is passed, the correlation values are reshaped as symmetric matrices with
ones on the diagonal.
Parameters
----------
as_vectors : bool
Whether to return raw data as vectors or as symmetric matrices.
Optional, default: False
Returns
-------
mat : array-like, shape=[86, {[28, 28], 378}
Connectomes.
patient_id : array-like, shape=[86,]
Patient unique identifiers
target : array-like, shape=[86,]
Labels, whether patients belong to the diseased class (1) or control
(0).
"""
with open(CONNECTOMES_PATH) as csvfile:
data_list = list(csv.reader(csvfile))
patient_id = gs.array([int(row[0]) for row in data_list[1:]])
data = gs.array(
[[float(value) for value in row[1:]] for row in data_list[1:]])
with open(CONNECTOMES_LABELS_PATH) as csvfile:
labels = list(csv.reader(csvfile))
target = gs.array([int(row[1]) for row in labels[1:]])
if as_vectors:
return data, patient_id, target
mat = SkewSymmetricMatrices(28).matrix_representation(data)
mat = gs.eye(28) - gs.transpose(gs.tril(mat), (0, 2, 1))
mat = 1.0 / 2.0 * (mat + gs.transpose(mat, (0, 2, 1)))
return mat, patient_id, target
def load_leaves():
"""Load data from data/leaves/leaves.xlsx.
Returns
-------
beta_param : array-like, shape=[172, 2]
Beta parameters of the beta distributions fitted to each
leaf orientation angle sample of 172 species of plants.
distrib_type: array-like, shape=[172, ]
Leaf orientation angle distribution type for each of the 172 species.
"""
data = pd.read_csv(LEAVES_PATH, sep=';')
beta_param = gs.array(data[['nu', 'mu']])
distrib_type = gs.squeeze(gs.array(data['Distribution']))
return beta_param, distrib_type
def load_emg():
"""Load data from data/emg/emg.csv.
Returns
-------
data_emg : pandas.DataFrame, shape=[731682, 10]
Emg time serie for each of the 8 electrodes, with the time stamps
and the label of the hand sign.
"""
data_emg = pd.read_csv(EMG_PATH)
return data_emg
def load_optical_nerves():
"""Load data from data/optical_nerves/optical_nerves.txt.
Load the dataset of sets of 5 landmarks, labelled S, T, I, N, V, in 3D
on monkeys' optical nerve heads:
- 1st landmark (S): superior aspect of the retina,
- 2nd landmark (T): side of the retina closest to the temporal
bone of the skull,
- 3rd landmark (N): nose side of the retina,
- 4th landmark (I): inferior point,
- 5th landmarks (V): optical nerve head deepest point.
For each monkey, an experimental glaucoma was introduced in one eye,
while the second eye was kept as control. This dataset can be used to
investigate a significant difference between the glaucoma and the
control eyes.
Label 0 refers to a normal eye, and Label 1 to an eye with glaucoma.
References
----------
.. [PE2015] V. Patrangenaru and L. Ellingson. Nonparametric Statistics
on Manifolds and Their Applications to Object Data, 2015.
https://doi.org/10.1201/b18969
Returns
-------
data : array-like, shape=[22, 5, 3]
Data representing the 5 landmarks, in 3D, for 11 different monkeys.
labels : array-like, shape=[22,]
Labels in {0, 1} classifying the corresponding optical nerve as
normal (label = 0) or glaucoma (label = 1).
monkeys : array-like, shape=[22,]
Indices in 0...10 referencing the index of the monkey to which a given
optical nerve belongs.
"""
nerves = pd.read_csv(OPTICAL_NERVES_PATH, sep='\t')
nerves = nerves.set_index('Filename')
nerves = nerves.drop(index=['laljn103.12b', 'lalj0103.12b'])
nerves = nerves.reset_index(drop=True)
nerves_gs = gs.array(nerves.values)
data = gs.reshape(nerves_gs, (nerves_gs.shape[0], -1, 3))
labels = gs.tile([0, 1], [nerves_gs.shape[0] // 2])
monkeys = gs.repeat(gs.arange(11), 2)
return data, labels, monkeys
def load_hands():
"""Load data from data/hands/hands.txt and labels.txt.
Load the dataset of hand poses, where a hand is represented as a
set of 22 landmarks - the hands joints - in 3D.
The hand poses represent two different hand poses:
- Label 0: hand is in the position "Grab"
- Label 1: hand is in the position "Expand"
This is a subset of the SHREC 2017 dataset [SWVGLF2017].
References
----------
.. [SWVGLF2017] Q. De Smedt, H. Wannous, J.P. Vandeborre,
J. Guerry, B. Le Saux, D. Filliat, SHREC'17 Track: 3D Hand Gesture
Recognition Using a Depth and Skeletal Dataset, 10th Eurographics
Workshop on 3D Object Retrieval, 2017.
https://doi.org/10.2312/3dor.20171049
Returns
-------
data : array-like, shape=[52, 22, 3]
Hand data, represented as a list of 22 joints, specifically as
the 3D coordinates of these joints.
labels : array-like, shape=[52,]
Label representing hands poses. Label 0: "Grab", Label 1: "Expand"
bone_list : array-like
List of bones, as a list of connexions between joints.
"""
data = gs.array(pd.read_csv(HANDS_PATH, sep=' ').values)
n_landmarks = 22
dim = 3
data = gs.reshape(data, (data.shape[0], n_landmarks, dim))
labels = gs.array(pd.read_csv(HANDS_LABELS_PATH).values.squeeze())
bone_list = gs.array(
[
[0, 1],
[0, 2],
[2, 3],
[3, 4],
[4, 5],
[1, 6],
[6, 7],
[7, 8],
[8, 9],
[1, 10],
[10, 11],
[11, 12],
[12, 13],
[1, 14],
[14, 15],
[15, 16],
[16, 17],
[1, 18],
[18, 19],
[19, 20],
[20, 21],
]
)
return data, labels, bone_list
def load_cells():
"""Load data from data/cells/cells.txt.
Returns
-------
cells : list of 367 discrete 2D curves
Each curve represents the boundary of a cell, their lengths are
not necessarily equal.
cell_lines : array of 367 strings
List of the cell lines of each cell.
treatments : array of 367 strings
List of the treatments given to each cell.
"""
with open(CELLS_PATH) as cells_file:
cells = cells_file.read().split('\n\n')
for i, cell in enumerate(cells):
cell = cell.split('\n')
curve = []
for point in cell:
coords = [int(coord) for coord in point.split()]
curve.append(coords)
cells[i] = gs.array(curve)
with open(CELL_LINES_PATH) as cell_lines_file:
cell_lines = gs.array(cell_lines_file.read().split('\n'))
with open(CELL_TREATMENTS_PATH) as treatments_file:
treatments = gs.array(treatments_file.read().split('\n'))
return cells, cell_lines, treatments
|
the-stack_0_18697 | from pynput.keyboard import Key, Listener
import logging
def main():
logging.basicConfig(filename='logged.txt', level=logging.DEBUG, format='%(asctime)s:%(name)s:%(levelname)s:%(message)s')
def on_press(Key):
logging.info(str(Key))
with Listener(on_press=on_press) as listener:
listener.join()
outputLogs()
def outputLogs():
with open('logged.txt', 'ab+') as lfIN:
for log in lfIN:
print(log.split(':'))
if __name__ == '__main__':
main()
|
the-stack_0_18700 | from PyQt5.QtWidgets import QTextEdit, QWidget
from PyQt5.QtCore import Qt
__textheight__ = 25
class TextBox(QTextEdit):
"""
Basic textbox widget with fixed height and placeholder text
"""
def __init__(self, placeHolderText: str, textHeight: int = __textheight__, parent: QWidget = None):
super().__init__(parent)
self.setFixedHeight(textHeight)
self.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.setPlaceholderText(placeHolderText) |
the-stack_0_18702 | #@+leo-ver=5-thin
#@+node:tbrown.20100318101414.5990: * @file viewrendered.py
#@+<< vr docstring >>
#@+node:tbrown.20100318101414.5991: ** << vr docstring >>
'''
Creates a window for *live* rendering of reSTructuredText, markdown text,
images, movies, sounds, rst, html, jupyter notebooks, etc.
Dependencies
============
This plugin uses docutils, http://docutils.sourceforge.net/, to render reStructuredText,
so installing docutils is highly recommended when using this plugin.
This plugin uses markdown, http://http://pypi.python.org/pypi/Markdown, to render Markdown,
so installing markdown is highly recommended when using this plugin.
Commands
========
viewrendered.py creates the following (``Alt-X``) commands:
``viewrendered (abbreviated vr)``
Opens a new rendering window.
By default, the rendering pane renders body text as reStructuredText,
with all Leo directives removed.
However, if the body text starts with ``<`` (after removing directives),
the body text is rendered as html.
**Important**: The default rendering just described does not apply to nodes
whose headlines begin with @image, @html, @movie, @networkx, @svg and @url.
See the section called **Special Renderings** below.
Rendering sets the process current directory (os.chdir()) to the path
to the node being rendered, to allow relative paths to work in ``.. image::`` directives.
.. ``viewrendered-big``
.. as above, but zoomed in, useful for presentations
.. ``viewrendered-html``
.. displays the html source generated from reStructuredText, useful for
.. debugging
``vr-hide``
Makes the rendering pane invisible, but does not destroy it.
``vr-lock`` and ``vr-unlock``
Locks and unlocks the rendering pane.
When unlocked (the initial state), the rendering pane renders the contents
of the presently selected node.
When locked, the rendering pane does not change when other nodes are selected.
This is useful for playing movies in the rendering pane.
``vr-pause-play-movie``
This command has effect only if the rendering pane is presently showing a movie.
It pauses the movie if playing, or resumes the movie if paused.
``vr-show``
Makes the rendering pane visible.
``vr-toggle``
Shows the rendering pane if invisible, otherwise hides it.
``vr-update``
Forces an update of the rendering pane.
This is especially useful for @graphics-script nodes:
such nodes are update automatically only when selected,
not when the body text changes.
Rendering reStructuredText
==========================
For example, both::
Heading
-------
`This` is **really** a line of text.
and::
<h1>Heading<h1>
<tt>This</tt> is <b>really</b> a line of text.
will look something like:
**Heading**
`This` is **really** a line of text.
**Important**: reStructuredText errors and warnings will appear in red in the rendering pane.
Rendering markdown
==================
Please see the markdown syntax document at http://daringfireball.net/projects/markdown/syntax
for more information on markdown.
Unless ``@string view-rendered-default-kind`` is set to ``md``, markdown rendering must be
specified by putting it in a ``@md`` node.
Special Renderings
===================
As stated above, the rendering pane renders body text as reStructuredText
by default, with all Leo directives removed. However, if the body text
starts with ``<`` (after removing directives), the body text is rendered as
html.
This plugin renders @md, @image, @jupyter, @html, @movie, @networkx and @svg nodes as follows:
**Note**: For @image, @movie and @svg nodes, either the headline or the first line of body text may
contain a filename. If relative, the filename is resolved relative to Leo's load directory.
- ``@md`` renderes the body text as markdown, as described above.
- ``@graphics-script`` executes the script in the body text in a context containing
two predefined variables:
- gs is the QGraphicsScene for the rendering pane.
- gv is the QGraphicsView for the rendering pane.
Using these variables, the script in the body text may create graphics to the rendering pane.
- ``@image`` renders the file as an image.
- ``@html`` renders the body text as html.
- ``@jupyter`` renders the output from Jupyter Notebooks.
The contents of the @jupyter node can be either a url to the notebook or
the actual JSON notebook itself.
Use file:// urls for local files. Some examples:
Windows: file:///c:/Test/a_notebook.ipynb
Linux: file:///home/a_notebook.ipynb
- ``@movie`` plays the file as a movie. @movie also works for music files.
- ``@networkx`` is non-functional at present. It is intended to
render the body text as a networkx graph.
See http://networkx.lanl.gov/
- ``@svg`` renders the file as a (possibly animated!) svg (Scalable Vector Image).
See http://en.wikipedia.org/wiki/Scalable_Vector_Graphics
**Note**: if the first character of the body text is ``<`` after removing Leo directives,
the contents of body pane is taken to be an svg image.
Settings
========
- ``@color rendering-pane-background-color = white``
The background color the rendering pane when rendering text.
- ``@bool view-rendered-auto-create = False``
When True, show the rendering pane when Leo opens an outline.
- ``@bool view-rendered-auto-hide = False``
When True, hide the rendering pane for text-only renderings.
- ``@string view-rendered-default-kind = rst``
The default kind of rendering. One of (big,rst,md,html)
- ``@string view-rendered-md-extensions = extra``
A comma-delineated list of markdown extensions to use.
Suitable extensions can be seen here:
http://pythonhosted.org/Markdown/extensions/index.html
Acknowledgments
================
Terry Brown created this initial version of this plugin,
and the free_layout and NestedSplitter plugins used by viewrendered.
Edward K. Ream generalized this plugin and added communication
and coordination between the free_layout, NestedSplitter and viewrendered plugins.
Jacob Peck added markdown support to this plugin.
'''
#@-<< vr docstring >>
#@+<< to do >>
#@+node:ekr.20140924060835.19485: ** << to do >> (vr)
#@+at
# To do:
#
# - Use the free_layout rotate-all command in Leo's toggle-split-direction command.
# - Add dict to allow customize must_update.
# - Lock movies automatically until they are finished?
# - Render @url nodes as html?
# - Support uA's that indicate the kind of rendering desired.
# - (Failed) Make viewrendered-big work.
#@-<< to do >>
#pylint: disable=no-member
trace = False
# This global trace is convenient.
#@+<< imports >>
#@+node:tbrown.20100318101414.5993: ** << imports >> (vr)
import leo.core.leoGlobals as g
try:
import leo.plugins.qt_text as qt_text
import leo.plugins.free_layout as free_layout
from leo.core.leoQt import isQt5, QtCore, QtGui, QtWidgets
from leo.core.leoQt import phonon, QtMultimedia, QtSvg, QtWebKitWidgets
except Exception:
QtWidgets = False
try:
import docutils
import docutils.core
except ImportError:
docutils = None
if docutils:
try:
from docutils.core import publish_string
from docutils.utils import SystemMessage
got_docutils = True
except ImportError:
got_docutils = False
g.es_exception()
except SyntaxError:
got_docutils = False
g.es_exception()
else:
got_docutils = False
# markdown support, non-vital
try:
from markdown import markdown
got_markdown = True
except ImportError:
got_markdown = False
import os
# nbformat (@jupyter) support, non-vital.
try:
import nbformat
from nbconvert import HTMLExporter
# from traitlets.config import Config
except ImportError:
nbformat = None
import json
try:
from urllib.request import urlopen
except ImportError:
try:
from urllib import urlopen # for Python 2.7
except ImportError:
urllib = None
#@-<< imports >>
#@+<< set BaseTextWidget >>
#@+node:ekr.20190424081947.1: ** << set BaseTextWidget >> (vr)
if QtWidgets:
try:
BaseTextWidget = QtWebKitWidgets.QWebView
except Exception:
BaseTextWidget = QtWidgets.QTextBrowser
else:
BaseTextWidget = None
#@-<< set BaseTextWidget >>
#@+<< define html templates >>
#@+node:ekr.20170324090828.1: ** << define html templates >> (vr)
image_template = '''\
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
<head></head>
<body bgcolor="#fffbdc">
<img src="%s">
</body>
</html>
'''
# http://docs.mathjax.org/en/latest/start.html
latex_template = '''\
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
<head>
<script src='https://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS-MML_HTMLorMML'>
</script>
</head>
<body bgcolor="#fffbdc">
%s
</body>
</html>
'''
#@-<< define html templates >>
controllers = {}
# Keys are c.hash(): values are PluginControllers (QWidget's).
layouts = {}
# Keys are c.hash(): values are tuples (layout_when_closed, layout_when_open)
#@+others
#@+node:ekr.20110320120020.14491: ** vr.Top-level
#@+node:tbrown.20100318101414.5994: *3* vr.decorate_window
def decorate_window(w):
# Do not override the style sheet!
# This interferes with themes
# w.setStyleSheet(stickynote_stylesheet)
g.app.gui.attachLeoIcon(w)
w.resize(600, 300)
#@+node:tbrown.20100318101414.5995: *3* vr.init
def init():
'''Return True if the plugin has loaded successfully.'''
global got_docutils
if g.app.gui.guiName() != 'qt':
return False
# #1248.
# if g.app.gui.guiName()
if not QtWidgets or not g.app.gui.guiName().startswith('qt'):
if (
not g.unitTesting and
not g.app.batchMode and
not g.app.gui.guiName() in ('browser', 'curses')
):
g.es_print('viewrendered requires Qt')
return False
if not got_docutils:
g.es_print('Warning: viewrendered.py running without docutils.')
# Always enable this plugin, even if imports fail.
g.plugin_signon(__name__)
g.registerHandler('after-create-leo-frame', onCreate)
g.registerHandler('close-frame', onClose)
g.registerHandler('scrolledMessage', show_scrolled_message)
return True
#@+node:ekr.20180825025924.1: *3* vr.isVisible
def isVisible():
'''Return True if the VR pane is visible.'''
#@+node:ekr.20110317024548.14376: *3* vr.onCreate
def onCreate(tag, keys):
c = keys.get('c')
if not c:
return
provider = ViewRenderedProvider(c)
free_layout.register_provider(c, provider)
if g.app.dock:
# Instantiate immediately.
viewrendered(event={'c': c})
#@+node:vitalije.20170712174157.1: *3* vr.onClose
def onClose(tag, keys):
c = keys.get('c')
h = c.hash()
vr = controllers.get(h)
if vr:
c.bodyWantsFocus()
del controllers[h]
vr.deactivate()
vr.deleteLater()
#@+node:tbrown.20110629132207.8984: *3* vr.show_scrolled_message
def show_scrolled_message(tag, kw):
if g.unitTesting:
return None # This just slows the unit tests.
c = kw.get('c')
flags = kw.get('flags') or 'rst'
vr = viewrendered(event=kw)
title = kw.get('short_title', '').strip()
vr.setWindowTitle(title)
s = '\n'.join([
title,
'=' * len(title),
'',
kw.get('msg')
])
vr.update(
tag='show-scrolled-message',
keywords={'c': c, 'force': True, 's': s, 'flags': flags},
)
return True
#@+node:vitalije.20170713082256.1: *3* vr.split_last_sizes
def split_last_sizes(sizes):
result = [2 * x for x in sizes[:-1]]
result.append(sizes[-1])
result.append(sizes[-1])
return result
#@+node:ekr.20110320120020.14490: ** vr.Commands
#@+node:ekr.20131213163822.16471: *3* g.command('preview')
@g.command('preview')
def preview(event):
'''A synonym for the vr-toggle command.'''
toggle_rendering_pane(event)
#@+node:tbrown.20100318101414.5998: *3* g.command('vr')
@g.command('vr')
def viewrendered(event):
"""Open render view for commander"""
global controllers, layouts
if g.app.gui.guiName() != 'qt':
return None
c = event.get('c')
if not c:
return None
h = c.hash()
vr = controllers.get(h)
if not vr:
controllers[h] = vr = ViewRenderedController(c)
if g.app.dock:
vr.show_dock_or_pane()
return vr
#
# Legacy code: add the pane to the splitter.
layouts[h] = c.db.get('viewrendered_default_layouts', (None, None))
vr._ns_id = '_leo_viewrendered' # for free_layout load/save
vr.splitter = splitter = c.free_layout.get_top_splitter()
if splitter:
vr.store_layout('closed')
sizes = split_last_sizes(splitter.sizes())
ok = splitter.add_adjacent(vr, 'bodyFrame', 'right-of')
if not ok:
splitter.insert(0, vr)
elif splitter.orientation() == QtCore.Qt.Horizontal:
splitter.setSizes(sizes)
vr.adjust_layout('open')
c.bodyWantsFocusNow()
return vr
#@+node:ekr.20130413061407.10362: *3* g.command('vr-contract')
@g.command('vr-contract')
def contract_rendering_pane(event):
'''Contract the rendering pane.'''
if g.app.gui.guiName() != 'qt':
return
c = event.get('c')
if not c:
return
vr = controllers.get(c.hash())
if not vr:
vr = viewrendered(event)
if g.app.dock:
return
vr.contract()
#@+node:ekr.20130413061407.10361: *3* g.command('vr-expand')
@g.command('vr-expand')
def expand_rendering_pane(event):
'''Expand the rendering pane.'''
if g.app.gui.guiName() != 'qt':
return
c = event.get('c')
if not c:
return
vr = controllers.get(c.hash())
if not vr:
vr = viewrendered(event)
if g.app.dock:
return
vr.expand()
#@+node:ekr.20110917103917.3639: *3* g.command('vr-hide')
@g.command('vr-hide')
def hide_rendering_pane(event):
'''Close the rendering pane.'''
global controllers, layouts
if g.app.gui.guiName() != 'qt':
return
c = event.get('c')
if not c:
return
vr = controllers.get(c.hash())
if not vr:
vr = viewrendered(event)
if g.app.dock:
if vr.external_dock:
return # Can't hide a top-level dock.
dock = vr.leo_dock
if dock:
dock.hide()
return
#
# Legacy code.
if vr.pyplot_active:
g.es_print('can not close VR pane after using pyplot')
return
vr.store_layout('open')
vr.deactivate()
vr.deleteLater()
def at_idle(c=c, _vr=vr):
_vr.adjust_layout('closed')
c.bodyWantsFocusNow()
QtCore.QTimer.singleShot(0, at_idle)
h = c.hash()
c.bodyWantsFocus()
if vr == controllers.get(h):
del controllers[h]
else:
g.trace('Can not happen: no controller for %s' % (c))
# Compatibility
close_rendering_pane = hide_rendering_pane
#@+node:ekr.20110321072702.14507: *3* g.command('vr-lock')
@g.command('vr-lock')
def lock_rendering_pane(event):
'''Lock the rendering pane.'''
global controllers
if g.app.gui.guiName() != 'qt':
return
c = event.get('c')
if not c:
return
vr = controllers.get(c.hash())
if not vr:
vr = viewrendered(event)
if not vr.locked:
vr.lock()
#@+node:ekr.20110320233639.5777: *3* g.command('vr-pause-play')
@g.command('vr-pause-play-movie')
def pause_play_movie(event):
'''Pause or play a movie in the rendering pane.'''
global controllers
if g.app.gui.guiName() != 'qt':
return
c = event.get('c')
if not c:
return
vr = controllers.get(c.hash())
if not vr:
vr = viewrendered(event)
vp = vr.vp
if not vp:
return
f = vp.pause if vp.isPlaying() else vp.play
f()
#@+node:ekr.20110317080650.14386: *3* g.command('vr-show')
@g.command('vr-show')
def show_rendering_pane(event):
'''Show the rendering pane.'''
global controllers
if g.app.gui.guiName() != 'qt':
return
c = event.get('c')
if not c:
return
vr = controllers.get(c.hash())
if not vr:
vr = viewrendered(event)
vr.show_dock_or_pane()
#@+node:ekr.20131001100335.16606: *3* g.command('vr-toggle')
@g.command('vr-toggle')
def toggle_rendering_pane(event):
'''Toggle the rendering pane.'''
global controllers
if g.app.gui.guiName() != 'qt':
return
c = event.get('c')
if not c:
return
if g.app.gui.guiName() != 'qt':
return
vr = controllers.get(c.hash())
if not vr:
vr = viewrendered(event)
vr.hide() # So the toggle below will work.
if g.app.dock:
if vr.external_dock:
return # Can't hide a top-level dock.
dock = vr.leo_dock
if dock:
f = dock.show if dock.isHidden() else dock.hide
f()
elif vr.isHidden():
show_rendering_pane(event)
else:
hide_rendering_pane(event)
#@+node:ekr.20130412180825.10345: *3* g.command('vr-unlock')
@g.command('vr-unlock')
def unlock_rendering_pane(event):
'''Pause or play a movie in the rendering pane.'''
global controllers
if g.app.gui.guiName() != 'qt':
return
c = event.get('c')
if not c:
return
vr = controllers.get(c.hash())
if not vr:
vr = viewrendered(event)
if vr.locked:
vr.unlock()
#@+node:ekr.20110321151523.14464: *3* g.command('vr-update')
@g.command('vr-update')
def update_rendering_pane(event):
'''Update the rendering pane'''
global controllers
if g.app.gui.guiName() != 'qt':
return
c = event.get('c')
if not c:
return
vr = controllers.get(c.hash())
if not vr:
vr = viewrendered(event)
vr.update(tag='view', keywords={'c': c, 'force': True})
#@+node:vitalije.20170712195827.1: *3* @g.command('vr-zoom')
@g.command('vr-zoom')
def zoom_rendering_pane(event):
global controllers
if g.app.gui.guiName() != 'qt':
return
c = event.get('c')
if not c:
return
vr = controllers.get(c.hash())
if not vr:
vr = viewrendered(event)
if g.app.dock:
return
flc = c.free_layout
if vr.zoomed:
for ns in flc.get_top_splitter().top().self_and_descendants():
if hasattr(ns, '_unzoom'):
# this splitter could have been added since
ns.setSizes(ns._unzoom)
else:
parents = []
parent = vr
while parent:
parents.append(parent)
parent = parent.parent()
for ns in flc.get_top_splitter().top().self_and_descendants():
# FIXME - shouldn't be doing this across windows
ns._unzoom = ns.sizes()
for i in range(ns.count()):
w = ns.widget(i)
if w in parents:
sizes = [0] * len(ns._unzoom)
sizes[i] = sum(ns._unzoom)
ns.setSizes(sizes)
break
vr.zoomed = not vr.zoomed
#@+node:tbrown.20110629084915.35149: ** class ViewRenderedProvider (vr)
class ViewRenderedProvider:
#@+others
#@+node:tbrown.20110629084915.35154: *3* vr.__init__
def __init__(self, c):
self.c = c
# Careful: we may be unit testing.
if hasattr(c, 'free_layout'):
splitter = c.free_layout.get_top_splitter()
if splitter:
splitter.register_provider(self)
#@+node:tbrown.20110629084915.35150: *3* vr.ns_provides
def ns_provides(self):
return [('Viewrendered', '_leo_viewrendered')]
#@+node:tbrown.20110629084915.35151: *3* vr.ns_provide
def ns_provide(self, id_):
global controllers, layouts
if id_ == '_leo_viewrendered':
c = self.c
vr = controllers.get(c.hash()) or ViewRenderedController(c)
h = c.hash()
controllers[h] = vr
if not layouts.get(h):
layouts[h] = c.db.get('viewrendered_default_layouts', (None, None))
# return ViewRenderedController(self.c)
return vr
return None
#@-others
#@+node:ekr.20110317024548.14375: ** class ViewRenderedController (QWidget)
if QtWidgets: # NOQA
class ViewRenderedController(QtWidgets.QWidget):
'''A class to control rendering in a rendering pane.'''
#@+others
#@+node:ekr.20110317080650.14380: *3* vr.ctor & helpers
def __init__(self, c, parent=None):
'''Ctor for ViewRenderedController class.'''
self.c = c
# Create the widget.
super().__init__(parent)
self.create_pane(parent)
# Set the ivars.
self.active = False
self.badColors = []
self.delete_callback = None
self.gnx = None
self.graphics_class = QtWidgets.QGraphicsWidget
self.pyplot_canvas = None
self.pyplot_imported = False
self.gs = None # For @graphics-script: a QGraphicsScene
self.gv = None # For @graphics-script: a QGraphicsView
self.inited = False
self.length = 0 # The length of previous p.b.
self.locked = False
self.pyplot_active = False
self.scrollbar_pos_dict = {} # Keys are vnodes, values are positions.
self.sizes = [] # Saved splitter sizes.
self.splitter = None
self.splitter_index = None # The index of the rendering pane in the splitter.
self.title = None
self.vp = None # The present video player.
self.w = None # The present widget in the rendering pane.
# User settings.
self.reloadSettings()
self.node_changed = True
# Init.
self.create_dispatch_dict()
self.activate()
self.zoomed = False
#@+node:ekr.20110320120020.14478: *4* vr.create_dispatch_dict
def create_dispatch_dict(self):
pc = self
d = {
'big': pc.update_rst,
'html': pc.update_html,
'graphics-script': pc.update_graphics_script,
'image': pc.update_image,
'jupyter': pc.update_jupyter,
'latex': pc.update_latex,
'markdown': pc.update_md,
'md': pc.update_md,
'movie': pc.update_movie,
'networkx': pc.update_networkx,
'pyplot': pc.update_pyplot,
'rest': pc.update_rst,
'rst': pc.update_rst,
'svg': pc.update_svg,
# 'url': pc.update_url,
# 'xml': pc.update_xml,
}
pc.dispatch_dict = d
return d
#@+node:ekr.20171114150510.1: *4* vr.reloadSettings
def reloadSettings(self):
c = self.c
c.registerReloadSettings(self)
self.auto_create = c.config.getBool('view-rendered-auto-create', False)
self.background_color = c.config.getColor('rendering-pane-background-color') or 'white'
self.default_kind = c.config.getString('view-rendered-default-kind') or 'rst'
self.external_dock = c.config.getBool('use-vr-dock', default=False)
#@+node:ekr.20190614065659.1: *4* vr.create_pane
def create_pane(self, parent):
'''Create the VR pane or dock.'''
c = self.c
dw = c.frame.top
self.leo_dock = None # May be set below.
if g.app.unitTesting:
return
#
# Create the inner contents.
self.setObjectName('viewrendered_pane')
self.setLayout(QtWidgets.QVBoxLayout())
self.layout().setContentsMargins(0, 0, 0, 0)
if not g.app.dock:
return
external_dock = c.config.getBool('use-vr-dock', default=False)
# reload_settings has not yet been called.
#
# Allow the VR dock to move only in special circumstances.
moveable = (
external_dock and g.app.init_docks or
g.app.get_central_widget(c) == 'body'
)
self.leo_dock = dock = dw.createDockWidget(
closeable=True, moveable=moveable, height=50, name='Render')
if moveable:
#
# Create a stand-alone dockable area.
dock.setWidget(self)
dw.addDockWidget(QtCore.Qt.RightDockWidgetArea, dock)
dock.show()
return
#
# Split the body dock.
dw.leo_docks.append(dock)
dock.setWidget(self)
dw.splitDockWidget(dw.body_dock, dock, QtCore.Qt.Horizontal)
#@+node:tbrown.20110621120042.22676: *3* vr.closeEvent
def closeEvent(self, event):
'''Close the vr window.'''
self.deactivate()
#@+node:ekr.20130413061407.10363: *3* vr.contract & expand
def contract(self):
self.change_size(-100)
def expand(self):
self.change_size(100)
def change_size(self, delta):
if hasattr(self.c, 'free_layout'):
splitter = self.parent()
i = splitter.indexOf(self)
assert i > -1
sizes = splitter.sizes()
n = len(sizes)
for j, size in enumerate(sizes):
if j == i:
sizes[j] = max(0, size + delta)
else:
sizes[j] = max(0, size - int(delta / (n - 1)))
splitter.setSizes(sizes)
#@+node:ekr.20110317080650.14381: *3* vr.activate
def activate(self):
'''Activate the vr-window.'''
pc = self
if pc.active: return
pc.inited = True
pc.active = True
g.registerHandler('select2', pc.update)
g.registerHandler('idle', pc.update)
#@+node:ekr.20110317080650.14382: *3* vr.deactivate
def deactivate(self):
'''Deactivate the vr window.'''
pc = self
# Never disable the idle-time hook: other plugins may need it.
g.unregisterHandler('select2', pc.update)
g.unregisterHandler('idle', pc.update)
pc.active = False
#@+node:ekr.20110321072702.14508: *3* vr.lock/unlock
def lock(self):
'''Lock the vr pane.'''
g.note('rendering pane locked')
self.locked = True
def unlock(self):
'''Unlock the vr pane.'''
g.note('rendering pane unlocked')
self.locked = False
#@+node:ekr.20160921071239.1: *3* vr.set_html
def set_html(self, s, w):
'''Set text in w to s, preserving scroll position.'''
pc = self
p = pc.c.p
sb = w.verticalScrollBar()
if sb:
d = pc.scrollbar_pos_dict
if pc.node_changed:
# Set the scrollbar.
pos = d.get(p.v, sb.sliderPosition())
sb.setSliderPosition(pos)
else:
# Save the scrollbars
d[p.v] = pos = sb.sliderPosition()
# if trace: g.trace('\n'+s)
w.setHtml(s)
if sb:
# Restore the scrollbars
assert pos is not None
sb.setSliderPosition(pos)
#@+node:ekr.20110319143920.14466: *3* vr.underline
def underline(self, s):
'''Generate rST underlining for s.'''
ch = '#'
n = max(4, len(g.toEncodedString(s, reportErrors=False)))
# return '%s\n%s\n%s\n\n' % (ch*n,s,ch*n)
return '%s\n%s\n\n' % (s, ch * n)
#@+node:ekr.20101112195628.5426: *3* vr.update & helpers
# Must have this signature: called by leoPlugins.callTagHandler.
def update(self, tag, keywords):
'''Update the vr pane. Called at idle time.'''
pc = self
p = pc.c.p
# #1256.
if self.locked:
return
if pc.must_update(keywords):
#
# Suppress updates until we change nodes.
pc.node_changed = pc.gnx != p.v.gnx
pc.gnx = p.v.gnx
pc.length = len(p.b) # not s
#
# Remove Leo directives.
s = keywords.get('s') if 's' in keywords else p.b
s = pc.remove_directives(s)
#
# Use plain text if we are hidden.
# This avoids annoying messages with rst.
dock = pc.leo_dock or pc
if dock.isHidden():
w = pc.ensure_text_widget()
w.setPlainText(s)
return
#
# Dispatch based on the computed kind.
kind = keywords.get('flags') if 'flags' in keywords else pc.get_kind(p)
if not kind:
# Do *not* try to render plain text.
w = pc.ensure_text_widget()
w.setPlainText(s)
pc.show() # Must be last.
return
f = pc.dispatch_dict.get(kind)
if not f:
g.trace('no handler for kind: %s' % kind)
f = pc.update_rst
f(s, keywords)
else:
# Save the scroll position.
w = pc.w
if w.__class__ == QtWidgets.QTextBrowser:
# 2011/07/30: The widget may no longer exist.
try:
sb = w.verticalScrollBar()
pc.scrollbar_pos_dict[p.v] = sb.sliderPosition()
except Exception:
g.es_exception()
pc.deactivate()
#@+node:ekr.20190424083049.1: *4* vr.create_base_text_widget
def create_base_text_widget(self):
'''Create a QWebView or a QTextBrowser.'''
c = self.c
w = BaseTextWidget()
n = c.config.getInt('qweb-view-font-size')
if n:
try:
# BaseTextWidget is a QWebView.
settings = w.settings()
settings.setFontSize(settings.DefaultFontSize, n)
except AttributeError:
# BaseTextWidget is a QTextBrowser.
pass
return w
#@+node:ekr.20110320120020.14486: *4* vr.embed_widget & helper
def embed_widget(self, w, delete_callback=None):
'''Embed widget w in the free_layout splitter.'''
pc = self; c = pc.c #X ; splitter = pc.splitter
pc.w = w
layout = self.layout()
for i in range(layout.count()):
layout.removeItem(layout.itemAt(0))
self.layout().addWidget(w)
w.show()
# Special inits for text widgets...
if w.__class__ == QtWidgets.QTextBrowser:
text_name = 'body-text-renderer'
w.setObjectName(text_name)
# Do not do this! It interferes with themes.
# pc.setBackgroundColor(pc.background_color, text_name, w)
w.setReadOnly(True)
# Create the standard Leo bindings.
wrapper_name = 'rendering-pane-wrapper'
wrapper = qt_text.QTextEditWrapper(w, wrapper_name, c)
w.leo_wrapper = wrapper
c.k.completeAllBindingsForWidget(wrapper)
w.setWordWrapMode(QtGui.QTextOption.WrapAtWordBoundaryOrAnywhere)
#@+node:ekr.20110321072702.14510: *5* vr.setBackgroundColor
def setBackgroundColor(self, colorName, name, w):
'''Set the background color of the vr pane.'''
if 0: # Do not do this! It interferes with themes.
pc = self
if not colorName: return
styleSheet = 'QTextEdit#%s { background-color: %s; }' % (name, colorName)
if QtGui.QColor(colorName).isValid():
w.setStyleSheet(styleSheet)
elif colorName not in pc.badColors:
pc.badColors.append(colorName)
g.warning('invalid body background color: %s' % (colorName))
#@+node:ekr.20110320120020.14476: *4* vr.must_update
def must_update(self, keywords):
'''Return True if we must update the rendering pane.'''
pc = self
c, p = pc.c, pc.c.p
if g.unitTesting:
return False
if keywords.get('force'):
pc.active = True
return True
if c != keywords.get('c') or not pc.active:
return False
if pc.locked:
return False
if pc.gnx != p.v.gnx:
return True
if len(p.b) != pc.length:
if pc.get_kind(p) in ('html', 'pyplot'):
pc.length = len(p.b)
return False # Only update explicitly.
return True
# This trace would be called at idle time.
# g.trace('no change')
return False
#@+node:ekr.20110321151523.14463: *4* vr.update_graphics_script
def update_graphics_script(self, s, keywords):
'''Update the graphics script in the vr pane.'''
pc = self; c = pc.c
force = keywords.get('force')
if pc.gs and not force:
return
if not pc.gs:
splitter = c.free_layout.get_top_splitter()
# Careful: we may be unit testing.
if not splitter:
g.trace('no splitter')
return
# Create the widgets.
pc.gs = QtWidgets.QGraphicsScene(splitter)
pc.gv = QtWidgets.QGraphicsView(pc.gs)
w = pc.gv.viewport() # A QWidget
# Embed the widgets.
def delete_callback():
for w in (pc.gs, pc.gv):
w.deleteLater()
pc.gs = pc.gv = None
pc.embed_widget(w, delete_callback=delete_callback)
c.executeScript(
script=s,
namespace={'gs': pc.gs, 'gv': pc.gv})
#@+node:ekr.20110321005148.14534: *4* vr.update_html
update_html_count = 0
def update_html(self, s, keywords):
'''Update html in the vr pane.'''
pc = self
c = pc.c
if pc.must_change_widget(BaseTextWidget):
w = self.create_base_text_widget()
pc.embed_widget(w)
assert(w == pc.w)
else:
w = pc.w
if isQt5:
w.hide() # This forces a proper update.
w.setHtml(s)
w.show()
c.bodyWantsFocusNow()
#@+node:ekr.20110320120020.14482: *4* vr.update_image
def update_image(self, s, keywords):
'''Update an image in the vr pane.'''
pc = self
if not s.strip():
return
lines = g.splitLines(s) or []
fn = lines and lines[0].strip()
if not fn:
return
w = pc.ensure_text_widget()
ok, path = pc.get_fn(fn, '@image')
if not ok:
w.setPlainText('@image: file not found: %s' % (path))
return
path = path.replace('\\', '/')
template = image_template % (path)
# Only works in Python 3.x.
template = g.adjustTripleString(template, pc.c.tab_width).strip()
# Sensitive to leading blank lines.
# template = g.toUnicode(template)
pc.show()
w.setReadOnly(False)
w.setHtml(template)
w.setReadOnly(True)
#@+node:ekr.20170105124347.1: *4* vr.update_jupyter & helper
update_jupyter_count = 0
def update_jupyter(self, s, keywords):
'''Update @jupyter node in the vr pane.'''
pc = self
c = pc.c
if pc.must_change_widget(BaseTextWidget):
w = self.create_base_text_widget()
pc.embed_widget(w)
assert(w == pc.w)
else:
w = pc.w
s = self.get_jupyter_source(c)
if isQt5:
w.hide() # This forces a proper update.
w.setHtml(s)
w.show()
c.bodyWantsFocusNow()
#@+node:ekr.20180311090852.1: *5* vr.get_jupyter_source
def get_jupyter_source(self, c):
'''Return the html for the @jupyer node.'''
body = c.p.b.lstrip()
if body.startswith('<'):
# Assume the body is html.
return body
if body.startswith('{'):
# Leo 5.7.1: Allow raw JSON.
s = body
else:
url = g.getUrlFromNode(c.p)
if not url:
return ''
if not nbformat:
return 'can not import nbformt to render url: %r' % url
try:
s = urlopen(url).read().decode()
except Exception:
return 'url not found: %s' % url
try:
nb = nbformat.reads(s, as_version=4)
e = HTMLExporter()
(s, junk_resources) = e.from_notebook_node(nb)
except nbformat.reader.NotJSONError:
pass # Assume the result is html.
return s
#@+node:ekr.20170324064811.1: *4* vr.update_latex & helper
def update_latex(self, s, keywords):
'''Update latex in the vr pane.'''
import sys
pc = self
c = pc.c
###
if sys.platform.startswith('win'):
g.es_print('latex rendering not ready for Python 3')
w = pc.ensure_text_widget()
pc.show()
w.setPlainText(s)
c.bodyWantsFocusNow()
return
if pc.must_change_widget(BaseTextWidget):
w = self.create_base_text_widget()
pc.embed_widget(w)
assert(w == pc.w)
else:
w = pc.w
w.hide() # This forces a proper update.
s = self.create_latex_html(s)
w.setHtml(s)
w.show()
c.bodyWantsFocusNow()
#@+node:ekr.20170324085132.1: *5* vr.create_latex_html
def create_latex_html(self, s):
'''Create an html page embedding the latex code s.'''
c = self.c
# pylint: disable=deprecated-method
try:
import html
escape = html.escape
except AttributeError:
import cgi
escape = cgi.escape
html_s = escape(s)
template = latex_template % (html_s)
template = g.adjustTripleString(template, c.tab_width).strip()
return template
#@+node:peckj.20130207132858.3671: *4* vr.update_md & helper
def update_md(self, s, keywords):
'''Update markdown text in the vr pane.'''
pc = self; c = pc.c; p = c.p
s = s.strip().strip('"""').strip("'''").strip()
isHtml = s.startswith('<') and not s.startswith('<<')
# Do this regardless of whether we show the widget or not.
w = pc.ensure_text_widget()
assert pc.w
if s:
pc.show()
if got_markdown:
force = keywords.get('force')
colorizer = c.frame.body.colorizer
language = colorizer.scanLanguageDirectives(p)
if force or language in ('rst', 'rest', 'markdown', 'md'):
if not isHtml:
s = self.convert_to_markdown(s)
self.set_html(s,w)
else:
# g.trace('markdown not available: using rst')
self.update_rst(s,keywords)
#@+node:ekr.20160921134552.1: *5* convert_to_markdown
def convert_to_markdown(self, s):
'''Convert s to html using the markdown processor.'''
pc = self
c, p = pc.c, pc.c.p
path = g.scanAllAtPathDirectives(c, p) or c.getNodePath(p)
if not os.path.isdir(path):
path = os.path.dirname(path)
if os.path.isdir(path):
os.chdir(path)
try:
if pc.title:
s = pc.underline(pc.title) + s
pc.title = None
mdext = c.config.getString('view-rendered-md-extensions') or 'extra'
mdext = [x.strip() for x in mdext.split(',')]
s = markdown(s, extensions=mdext)
s = g.toUnicode(s)
except SystemMessage as sm:
msg = sm.args[0]
if 'SEVERE' in msg or 'FATAL' in msg:
s = 'MD error:\n%s\n\n%s' % (msg, s)
return s
#@+node:ekr.20110320120020.14481: *4* vr.update_movie
movie_warning = False
def update_movie(self, s, keywords):
'''Update a movie in the vr pane.'''
# pylint: disable=maybe-no-member
# 'PyQt4.phonon' has no 'VideoPlayer' member
# 'PyQt4.phonon' has no 'VideoCategory' member
# 'PyQt4.phonon' has no 'MediaSource' member
pc = self
ok, path = pc.get_fn(s, '@movie')
if not ok:
w = pc.ensure_text_widget()
w.setPlainText('Not found: %s' % (path))
return
if not phonon and not QtMultimedia:
if not self.movie_warning:
self.movie_warning = True
g.es_print('No phonon and no QtMultimedia modules')
w = pc.ensure_text_widget()
w.setPlainText('')
return
if pc.vp:
vp = pc.vp
pc.vp.stop()
pc.vp.deleteLater()
# Create a fresh player.
g.es_print('playing', path)
if QtMultimedia:
url= QtCore.QUrl.fromLocalFile(path)
content= QtMultimedia.QMediaContent(url)
pc.vp = vp = QtMultimedia.QMediaPlayer()
vp.setMedia(content)
# Won't play .mp4 files: https://bugreports.qt.io/browse/QTBUG-32783
vp.play()
else:
pc.vp = vp = phonon.VideoPlayer(phonon.VideoCategory)
vw = vp.videoWidget()
vw.setObjectName('video-renderer')
# Embed the widgets
def delete_callback():
if pc.vp:
pc.vp.stop()
pc.vp.deleteLater()
pc.vp = None
pc.embed_widget(vp, delete_callback=delete_callback)
pc.show()
vp = pc.vp
vp.load(phonon.MediaSource(path))
vp.play()
#@+node:ekr.20110320120020.14484: *4* vr.update_networkx
def update_networkx(self, s, keywords):
'''Update a networkx graphic in the vr pane.'''
pc = self
w = pc.ensure_text_widget()
w.setPlainText('') # 'Networkx: len: %s' % (len(s)))
pc.show()
#@+node:ekr.20160928023915.1: *4* vr.update_pyplot
def update_pyplot(self, s, keywords):
'''Get the pyplot script at c.p.b and show it.'''
c = self.c
if not self.pyplot_imported:
self.pyplot_imported = True
backend = g.os_path_finalize_join(
g.app.loadDir, '..', 'plugins', 'pyplot_backend.py')
if g.os_path_exists(backend):
try:
# The order of these statements is important...
import matplotlib
matplotlib.use('module://leo.plugins.pyplot_backend')
except ImportError:
g.trace('===== FAIL: pyplot.backend')
else:
g.trace('===== MISSING: pyplot.backend')
try:
import matplotlib # Make *sure* this is imported.
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.animation as animation
plt.ion() # Automatically set interactive mode.
namespace = {
'animation': animation,
'matplotlib': matplotlib,
'numpy': np, 'np': np,
'pyplot': plt, 'plt': plt,
}
except ImportError:
g.es_print('matplotlib imports failed')
namespace = {}
# Embedding already works without this!
# self.embed_pyplot_widget()
self.pyplot_active = True
# pyplot will throw RuntimeError if we close the pane.
c.executeScript(
event=None,
args=None, p=None,
script=None,
useSelectedText=False,
define_g=True,
define_name='__main__',
silent=False,
namespace=namespace,
raiseFlag=False,
runPyflakes=False, # Suppress warnings about pre-defined symbols.
)
c.bodyWantsFocusNow()
#@+node:ekr.20110320120020.14477: *4* vr.update_rst & helpers
def update_rst(self, s, keywords):
'''Update rst in the vr pane.'''
pc = self
s = s.strip().strip('"""').strip("'''").strip()
isHtml = s.startswith('<') and not s.startswith('<<')
# Do this regardless of whether we show the widget or not.
w = pc.ensure_text_widget()
assert pc.w
if s:
pc.show()
if got_docutils:
# Fix #420: viewrendered does not render some nodes
# Users (rightly) complained, so don't be clever here:
# c, p = pc.c, pc.c.p
# force = keywords.get('force')
# colorizer = c.frame.body.colorizer
# language = colorizer.scanLanguageDirectives(p)
# force or language in ('rst', 'rest', 'markdown', 'md'):
if not isHtml:
s = pc.convert_to_html(s)
pc.set_html(s, w)
else:
w.setPlainText(s)
#@+node:ekr.20160920221324.1: *5* vr.convert_to_html
def convert_to_html(self, s):
'''Convert s to html using docutils.'''
c, p = self.c, self.c.p
# Update the current path.
path = g.scanAllAtPathDirectives(c, p) or c.getNodePath(p)
if not os.path.isdir(path):
path = os.path.dirname(path)
if os.path.isdir(path):
os.chdir(path)
try:
if self.title:
s = self.underline(self.title) + s
self.title = None
# Call docutils to get the string.
s = publish_string(s, writer_name='html')
s = g.toUnicode(s)
except SystemMessage as sm:
msg = sm.args[0]
if 'SEVERE' in msg or 'FATAL' in msg:
s = 'RST error:\n%s\n\n%s' % (msg, s)
return s
#@+node:ekr.20110320120020.14479: *4* vr.update_svg
# http://doc.trolltech.com/4.4/qtsvg.html
# http://doc.trolltech.com/4.4/painting-svgviewer.html
def update_svg(self, s, keywords):
pc = self
if pc.must_change_widget(QtSvg.QSvgWidget):
w = QtSvg.QSvgWidget()
pc.embed_widget(w)
assert(w == pc.w)
else:
w = pc.w
if s.strip().startswith('<'):
# Assume it is the svg (xml) source.
s = g.adjustTripleString(s, pc.c.tab_width).strip()
# Sensitive to leading blank lines.
s = g.toEncodedString(s)
pc.show()
w.load(QtCore.QByteArray(s))
w.show()
else:
# Get a filename from the headline or body text.
ok, path = pc.get_fn(s, '@svg')
if ok:
pc.show()
w.load(path)
w.show()
#@+node:ekr.20110321005148.14537: *4* vr.update_url
def update_url(self, s, keywords):
pc = self
c, p = self.c, self.c.p
colorizer = c.frame.body.colorizer
language = colorizer.scanLanguageDirectives(p)
if language in ('rest', 'rst'):
pc.update_rst(s, keywords)
elif language in ('markdown', 'md'):
pc.update_md(s, keywords)
elif pc.default_kind in ('rest', 'rst'):
pc.update_rst(s, keywords)
elif pc.default_kind in ('markdown', 'md'):
pc.update_md(s, keywords)
else:
# Do nothing.
g.trace('ignore',s)
w = pc.ensure_text_widget()
pc.show()
w.setPlainText('')
#@+node:ekr.20110322031455.5765: *4* vr.utils for update helpers...
#@+node:ekr.20110322031455.5764: *5* vr.ensure_text_widget
def ensure_text_widget(self):
'''Swap a text widget into the rendering pane if necessary.'''
c, pc = self.c, self
if pc.must_change_widget(QtWidgets.QTextBrowser):
# Instantiate a new QTextBrowser.
# Allow non-ctrl clicks to open url's.
w = QtWidgets.QTextBrowser()
def handleClick(url, w=w):
import leo.plugins.qt_text as qt_text
wrapper = qt_text.QTextEditWrapper(w, name='vr-body', c=c)
event = g.Bunch(c=c, w=wrapper)
g.openUrlOnClick(event, url=url)
# if self.w and hasattr(self.w, 'anchorClicked'):
# try:
# self.w.anchorClicked.disconnect()
# except Exception:
# g.es_exception()
w.anchorClicked.connect(handleClick)
w.setOpenLinks(False)
pc.embed_widget(w) # Creates w.wrapper
assert(w == pc.w)
return pc.w
#@+node:ekr.20110320120020.14483: *5* vr.get_kind
def get_kind(self, p):
'''Return the proper rendering kind for node p.'''
c, h, pc = self.c, p.h, self
if h.startswith('@'):
i = g.skip_id(h, 1, chars='-')
word = h[1: i].lower().strip()
if word in pc.dispatch_dict:
return word
# 2016/03/25: Honor @language
colorizer = c.frame.body.colorizer
language = colorizer.scanLanguageDirectives(p, use_default=False)
# Fix #344: don't use c.target_language as a default.
if got_markdown and language in ('md', 'markdown'):
return language
if got_docutils and language in ('rest', 'rst'):
return language
if language and language in pc.dispatch_dict:
return language
# To do: look at ancestors, or uA's.
return None
#@+node:ekr.20110320233639.5776: *5* vr.get_fn
def get_fn(self, s, tag):
pc = self
c = pc.c
fn = s or c.p.h[len(tag):]
fn = fn.strip()
# Similar to code in g.computeFileUrl
if fn.startswith('~'):
# Expand '~' and handle Leo expressions.
fn = fn[1:]
fn = g.os_path_expanduser(fn)
fn = g.os_path_expandExpression(fn, c=c)
fn = g.os_path_finalize(fn)
else:
# Handle Leo expressions.
fn = g.os_path_expandExpression(fn, c=c)
# Handle ancestor @path directives.
if c and c.openDirectory:
base = c.getNodePath(c.p)
fn = g.os_path_finalize_join(c.openDirectory, base, fn)
else:
fn = g.os_path_finalize(fn)
ok = g.os_path_exists(fn)
# if not ok: g.trace('not found', fn)
return ok, fn
#@+node:ekr.20110321005148.14536: *5* vr.get_url
def get_url(self, s, tag):
p = self.c.p
url = s or p.h[len(tag):]
url = url.strip()
return url
#@+node:ekr.20110322031455.5763: *5* vr.must_change_widget
def must_change_widget(self, widget_class):
pc = self
return not pc.w or pc.w.__class__ != widget_class
#@+node:ekr.20110320120020.14485: *5* vr.remove_directives
def remove_directives(self, s):
lines = g.splitLines(s)
result = []
for s in lines:
if s.startswith('@'):
i = g.skip_id(s, 1)
word = s[1: i]
if word in g.globalDirectiveList:
continue
result.append(s)
return ''.join(result)
#@+node:vitalije.20170712183051.1: *3* vr.adjust_layout (legacy only)
def adjust_layout(self, which):
global layouts
c = self.c
splitter = self.splitter
deflo = c.db.get('viewrendered_default_layouts', (None, None))
loc, loo = layouts.get(c.hash(), deflo)
if which == 'closed' and loc and splitter:
splitter.load_layout(loc)
elif which == 'open' and loo and splitter:
splitter.load_layout(loo)
#@+node:ekr.20190614133401.1: *3* vr.show_dock_or_pane
def show_dock_or_pane(self):
c, vr = self.c, self
if g.app.dock:
dock = vr.leo_dock
if dock:
if dock.isHidden():
dock.show()
dock.raise_()
# #1230.
else:
vr.activate()
vr.show()
vr.adjust_layout('open')
c.bodyWantsFocusNow()
#@+node:vitalije.20170712183618.1: *3* vr.store_layout
def store_layout(self, which):
global layouts
c = self.c; h = c.hash()
splitter = self.splitter
deflo = c.db.get('viewrendered_default_layouts', (None, None))
(loc, loo) = layouts.get(c.hash(), deflo)
if which == 'closed' and splitter:
loc = splitter.get_saveable_layout()
loc = json.loads(json.dumps(loc))
layouts[h] = loc, loo
elif which == 'open' and splitter:
loo = splitter.get_saveable_layout()
loo = json.loads(json.dumps(loo))
layouts[h] = loc, loo
c.db['viewrendered_default_layouts'] = layouts[h]
#@-others
#@-others
#@@language python
#@@tabwidth -4
#@-leo
|
the-stack_0_18705 | import asyncio
import logging
import os
import tempfile
import textwrap
import uuid
from functools import partial
from multiprocessing import Process
from typing import Any, Callable, Dict, List, Optional, Text, Tuple, Union, Set
import numpy as np
from aiohttp import ClientError
from colorclass import Color
from rasa.nlu.training_data.loading import MARKDOWN, RASA
from sanic import Sanic, response
from sanic.exceptions import NotFound
from terminaltables import AsciiTable, SingleTable
import questionary
import rasa.cli.utils
from questionary import Choice, Form, Question
from rasa.cli import utils as cliutils
from rasa.core import constants, run, train, utils
from rasa.core.actions.action import ACTION_LISTEN_NAME, default_action_names
from rasa.core.channels.channel import UserMessage
from rasa.core.constants import (
DEFAULT_SERVER_FORMAT,
DEFAULT_SERVER_PORT,
DEFAULT_SERVER_URL,
REQUESTED_SLOT,
UTTER_PREFIX,
)
from rasa.core.domain import Domain
import rasa.core.events
from rasa.core.events import (
ActionExecuted,
ActionReverted,
BotUttered,
Event,
Restarted,
UserUttered,
UserUtteranceReverted,
)
from rasa.core.interpreter import INTENT_MESSAGE_PREFIX, NaturalLanguageInterpreter
from rasa.core.trackers import EventVerbosity, DialogueStateTracker
from rasa.core.training import visualization
from rasa.core.training.visualization import (
VISUALIZATION_TEMPLATE_PATH,
visualize_neighborhood,
)
from rasa.core.utils import AvailableEndpoints
from rasa.utils.common import update_sanic_log_level
from rasa.utils.endpoints import EndpointConfig
# noinspection PyProtectedMember
from rasa.nlu.training_data import loading
from rasa.nlu.training_data.message import Message
# WARNING: This command line UI is using an external library
# communicating with the shell - these functions are hard to test
# automatically. If you change anything in here, please make sure to
# run the interactive learning and check if your part of the "ui"
# still works.
import rasa.utils.io as io_utils
logger = logging.getLogger(__name__)
MAX_VISUAL_HISTORY = 3
PATHS = {
"stories": "data/stories.md",
"nlu": "data/nlu.md",
"backup": "data/nlu_interactive.md",
"domain": "domain.yml",
}
SAVE_IN_E2E = False
# choose other intent, making sure this doesn't clash with an existing intent
OTHER_INTENT = uuid.uuid4().hex
OTHER_ACTION = uuid.uuid4().hex
NEW_ACTION = uuid.uuid4().hex
NEW_TEMPLATES = {}
MAX_NUMBER_OF_TRAINING_STORIES_FOR_VISUALIZATION = 200
DEFAULT_STORY_GRAPH_FILE = "story_graph.dot"
class RestartConversation(Exception):
"""Exception used to break out the flow and restart the conversation."""
pass
class ForkTracker(Exception):
"""Exception used to break out the flow and fork at a previous step.
The tracker will be reset to the selected point in the past and the
conversation will continue from there."""
pass
class UndoLastStep(Exception):
"""Exception used to break out the flow and undo the last step.
The last step is either the most recent user message or the most
recent action run by the bot."""
pass
class Abort(Exception):
"""Exception used to abort the interactive learning and exit."""
pass
async def send_message(
endpoint: EndpointConfig,
sender_id: Text,
message: Text,
parse_data: Optional[Dict[Text, Any]] = None,
) -> Dict[Text, Any]:
"""Send a user message to a conversation."""
payload = {
"sender": UserUttered.type_name,
"text": message,
"parse_data": parse_data,
}
return await endpoint.request(
json=payload, method="post", subpath=f"/conversations/{sender_id}/messages"
)
async def request_prediction(
endpoint: EndpointConfig, sender_id: Text
) -> Dict[Text, Any]:
"""Request the next action prediction from core."""
return await endpoint.request(
method="post", subpath=f"/conversations/{sender_id}/predict"
)
async def retrieve_domain(endpoint: EndpointConfig) -> Dict[Text, Any]:
"""Retrieve the domain from core."""
return await endpoint.request(
method="get", subpath="/domain", headers={"Accept": "application/json"}
)
async def retrieve_status(endpoint: EndpointConfig) -> Dict[Text, Any]:
"""Retrieve the status from core."""
return await endpoint.request(method="get", subpath="/status")
async def retrieve_tracker(
endpoint: EndpointConfig,
sender_id: Text,
verbosity: EventVerbosity = EventVerbosity.ALL,
) -> Dict[Text, Any]:
"""Retrieve a tracker from core."""
path = "/conversations/{}/tracker?include_events={}".format(
sender_id, verbosity.name
)
return await endpoint.request(
method="get", subpath=path, headers={"Accept": "application/json"}
)
async def send_action(
endpoint: EndpointConfig,
sender_id: Text,
action_name: Text,
policy: Optional[Text] = None,
confidence: Optional[float] = None,
is_new_action: bool = False,
) -> Dict[Text, Any]:
"""Log an action to a conversation."""
payload = ActionExecuted(action_name, policy, confidence).as_dict()
subpath = f"/conversations/{sender_id}/execute"
try:
return await endpoint.request(json=payload, method="post", subpath=subpath)
except ClientError:
if is_new_action:
if action_name in NEW_TEMPLATES:
warning_questions = questionary.confirm(
"WARNING: You have created a new action: '{}', "
"with matching template: '{}'. "
"This action will not return its message in this session, "
"but the new utterance will be saved to your domain file "
"when you exit and save this session. "
"You do not need to do anything further. "
"".format(action_name, [*NEW_TEMPLATES[action_name]][0])
)
await _ask_questions(warning_questions, sender_id, endpoint)
else:
warning_questions = questionary.confirm(
"WARNING: You have created a new action: '{}', "
"which was not successfully executed. "
"If this action does not return any events, "
"you do not need to do anything. "
"If this is a custom action which returns events, "
"you are recommended to implement this action "
"in your action server and try again."
"".format(action_name)
)
await _ask_questions(warning_questions, sender_id, endpoint)
payload = ActionExecuted(action_name).as_dict()
return await send_event(endpoint, sender_id, payload)
else:
logger.error("failed to execute action!")
raise
async def send_event(
endpoint: EndpointConfig,
sender_id: Text,
evt: Union[List[Dict[Text, Any]], Dict[Text, Any]],
) -> Dict[Text, Any]:
"""Log an event to a conversation."""
subpath = f"/conversations/{sender_id}/tracker/events"
return await endpoint.request(json=evt, method="post", subpath=subpath)
def format_bot_output(message: BotUttered) -> Text:
"""Format a bot response to be displayed in the history table."""
# First, add text to output
output = message.text or ""
# Then, append all additional items
data = message.data or {}
if not data:
return output
if data.get("image"):
output += "\nImage: " + data.get("image")
if data.get("attachment"):
output += "\nAttachment: " + data.get("attachment")
if data.get("buttons"):
output += "\nButtons:"
choices = cliutils.button_choices_from_message_data(
data, allow_free_text_input=True
)
for choice in choices:
output += "\n" + choice
if data.get("elements"):
output += "\nElements:"
for idx, element in enumerate(data.get("elements")):
element_str = cliutils.element_to_string(element, idx)
output += "\n" + element_str
if data.get("quick_replies"):
output += "\nQuick replies:"
for idx, element in enumerate(data.get("quick_replies")):
element_str = cliutils.element_to_string(element, idx)
output += "\n" + element_str
return output
def latest_user_message(events: List[Dict[Text, Any]]) -> Optional[Dict[Text, Any]]:
"""Return most recent user message."""
for i, e in enumerate(reversed(events)):
if e.get("event") == UserUttered.type_name:
return e
return None
def all_events_before_latest_user_msg(
events: List[Dict[Text, Any]]
) -> List[Dict[Text, Any]]:
"""Return all events that happened before the most recent user message."""
for i, e in enumerate(reversed(events)):
if e.get("event") == UserUttered.type_name:
return events[: -(i + 1)]
return events
async def _ask_questions(
questions: Union[Form, Question],
sender_id: Text,
endpoint: EndpointConfig,
is_abort: Callable[[Dict[Text, Any]], bool] = lambda x: False,
) -> Any:
"""Ask the user a question, if Ctrl-C is pressed provide user with menu."""
should_retry = True
answers = {}
while should_retry:
answers = questions.ask()
if answers is None or is_abort(answers):
should_retry = await _ask_if_quit(sender_id, endpoint)
else:
should_retry = False
return answers
def _selection_choices_from_intent_prediction(
predictions: List[Dict[Text, Any]]
) -> List[Dict[Text, Any]]:
""""Given a list of ML predictions create a UI choice list."""
sorted_intents = sorted(predictions, key=lambda k: (-k["confidence"], k["name"]))
choices = []
for p in sorted_intents:
name_with_confidence = "{:03.2f} {:40}".format(
p.get("confidence"), p.get("name")
)
choice = {"name": name_with_confidence, "value": p.get("name")}
choices.append(choice)
return choices
async def _request_free_text_intent(sender_id: Text, endpoint: EndpointConfig) -> Text:
question = questionary.text(
message="Please type the intent name:",
validate=io_utils.not_empty_validator("Please enter an intent name"),
)
return await _ask_questions(question, sender_id, endpoint)
async def _request_free_text_action(sender_id: Text, endpoint: EndpointConfig) -> Text:
question = questionary.text(
message="Please type the action name:",
validate=io_utils.not_empty_validator("Please enter an action name"),
)
return await _ask_questions(question, sender_id, endpoint)
async def _request_free_text_utterance(
sender_id: Text, endpoint: EndpointConfig, action: Text
) -> Text:
question = questionary.text(
message=(
"Please type the message for your new utterance "
"template '{}':".format(action)
),
validate=io_utils.not_empty_validator("Please enter a template message"),
)
return await _ask_questions(question, sender_id, endpoint)
async def _request_selection_from_intents(
intents: List[Dict[Text, Text]], sender_id: Text, endpoint: EndpointConfig
) -> Text:
question = questionary.select("What intent is it?", choices=intents)
return await _ask_questions(question, sender_id, endpoint)
async def _request_fork_point_from_list(
forks: List[Dict[Text, Text]], sender_id: Text, endpoint: EndpointConfig
) -> Text:
question = questionary.select(
"Before which user message do you want to fork?", choices=forks
)
return await _ask_questions(question, sender_id, endpoint)
async def _request_fork_from_user(
sender_id, endpoint
) -> Optional[List[Dict[Text, Any]]]:
"""Take in a conversation and ask at which point to fork the conversation.
Returns the list of events that should be kept. Forking means, the
conversation will be reset and continued from this previous point."""
tracker = await retrieve_tracker(endpoint, sender_id, EventVerbosity.AFTER_RESTART)
choices = []
for i, e in enumerate(tracker.get("events", [])):
if e.get("event") == UserUttered.type_name:
choices.append({"name": e.get("text"), "value": i})
fork_idx = await _request_fork_point_from_list(
list(reversed(choices)), sender_id, endpoint
)
if fork_idx is not None:
return tracker.get("events", [])[: int(fork_idx)]
else:
return None
async def _request_intent_from_user(
latest_message, intents, sender_id, endpoint
) -> Dict[Text, Any]:
"""Take in latest message and ask which intent it should have been.
Returns the intent dict that has been selected by the user."""
predictions = latest_message.get("parse_data", {}).get("intent_ranking", [])
predicted_intents = {p["name"] for p in predictions}
for i in intents:
if i not in predicted_intents:
predictions.append({"name": i, "confidence": 0.0})
# convert intents to ui list and add <other> as a free text alternative
choices = [
{"name": "<create_new_intent>", "value": OTHER_INTENT}
] + _selection_choices_from_intent_prediction(predictions)
intent_name = await _request_selection_from_intents(choices, sender_id, endpoint)
if intent_name == OTHER_INTENT:
intent_name = await _request_free_text_intent(sender_id, endpoint)
selected_intent = {"name": intent_name, "confidence": 1.0}
else:
# returns the selected intent with the original probability value
selected_intent = next(
(x for x in predictions if x["name"] == intent_name), {"name": None}
)
return selected_intent
async def _print_history(sender_id: Text, endpoint: EndpointConfig) -> None:
"""Print information about the conversation for the user."""
tracker_dump = await retrieve_tracker(
endpoint, sender_id, EventVerbosity.AFTER_RESTART
)
events = tracker_dump.get("events", [])
table = _chat_history_table(events)
slot_strs = _slot_history(tracker_dump)
print("------")
print("Chat History\n")
print(table)
if slot_strs:
print("\n")
print("Current slots: \n\t{}\n".format(", ".join(slot_strs)))
print("------")
def _chat_history_table(events: List[Dict[Text, Any]]) -> Text:
"""Create a table containing bot and user messages.
Also includes additional information, like any events and
prediction probabilities."""
def wrap(txt: Text, max_width: int) -> Text:
return "\n".join(textwrap.wrap(txt, max_width, replace_whitespace=False))
def colored(txt: Text, color: Text) -> Text:
return "{" + color + "}" + txt + "{/" + color + "}"
def format_user_msg(user_event: UserUttered, max_width: int) -> Text:
intent = user_event.intent or {}
intent_name = intent.get("name", "")
_confidence = intent.get("confidence", 1.0)
_md = _as_md_message(user_event.parse_data)
_lines = [
colored(wrap(_md, max_width), "hired"),
f"intent: {intent_name} {_confidence:03.2f}",
]
return "\n".join(_lines)
def bot_width(_table: AsciiTable) -> int:
return _table.column_max_width(1)
def user_width(_table: AsciiTable) -> int:
return _table.column_max_width(3)
def add_bot_cell(data, cell):
data.append([len(data), Color(cell), "", ""])
def add_user_cell(data, cell):
data.append([len(data), "", "", Color(cell)])
# prints the historical interactions between the bot and the user,
# to help with correctly identifying the action
table_data = [
[
"# ",
Color(colored("Bot ", "autoblue")),
" ",
Color(colored("You ", "hired")),
]
]
table = SingleTable(table_data, "Chat History")
bot_column = []
tracker = DialogueStateTracker.from_dict("any", events)
applied_events = tracker.applied_events()
for idx, event in enumerate(applied_events):
if isinstance(event, ActionExecuted):
bot_column.append(colored(event.action_name, "autocyan"))
if event.confidence is not None:
bot_column[-1] += colored(f" {event.confidence:03.2f}", "autowhite")
elif isinstance(event, UserUttered):
if bot_column:
text = "\n".join(bot_column)
add_bot_cell(table_data, text)
bot_column = []
msg = format_user_msg(event, user_width(table))
add_user_cell(table_data, msg)
elif isinstance(event, BotUttered):
wrapped = wrap(format_bot_output(event), bot_width(table))
bot_column.append(colored(wrapped, "autoblue"))
else:
if event.as_story_string():
bot_column.append(wrap(event.as_story_string(), bot_width(table)))
if bot_column:
text = "\n".join(bot_column)
add_bot_cell(table_data, text)
table.inner_heading_row_border = False
table.inner_row_border = True
table.inner_column_border = False
table.outer_border = False
table.justify_columns = {0: "left", 1: "left", 2: "center", 3: "right"}
return table.table
def _slot_history(tracker_dump: Dict[Text, Any]) -> List[Text]:
"""Create an array of slot representations to be displayed."""
slot_strs = []
for k, s in tracker_dump.get("slots", {}).items():
colored_value = cliutils.wrap_with_color(
str(s), color=rasa.cli.utils.bcolors.WARNING
)
slot_strs.append(f"{k}: {colored_value}")
return slot_strs
async def _write_data_to_file(sender_id: Text, endpoint: EndpointConfig):
"""Write stories and nlu data to file."""
story_path, nlu_path, domain_path = _request_export_info()
tracker = await retrieve_tracker(endpoint, sender_id)
events = tracker.get("events", [])
serialised_domain = await retrieve_domain(endpoint)
domain = Domain.from_dict(serialised_domain)
await _write_stories_to_file(story_path, events, domain)
await _write_nlu_to_file(nlu_path, events)
await _write_domain_to_file(domain_path, events, domain)
logger.info("Successfully wrote stories and NLU data")
async def _ask_if_quit(sender_id: Text, endpoint: EndpointConfig) -> bool:
"""Display the exit menu.
Return `True` if the previous question should be retried."""
answer = questionary.select(
message="Do you want to stop?",
choices=[
Choice("Continue", "continue"),
Choice("Undo Last", "undo"),
Choice("Fork", "fork"),
Choice("Start Fresh", "restart"),
Choice("Export & Quit", "quit"),
],
).ask()
if not answer or answer == "quit":
# this is also the default answer if the user presses Ctrl-C
await _write_data_to_file(sender_id, endpoint)
raise Abort()
elif answer == "continue":
# in this case we will just return, and the original
# question will get asked again
return True
elif answer == "undo":
raise UndoLastStep()
elif answer == "fork":
raise ForkTracker()
elif answer == "restart":
raise RestartConversation()
async def _request_action_from_user(
predictions: List[Dict[Text, Any]], sender_id: Text, endpoint: EndpointConfig
) -> Tuple[Text, bool]:
"""Ask the user to correct an action prediction."""
await _print_history(sender_id, endpoint)
choices = [
{
"name": "{:03.2f} {:40}".format(a.get("score"), a.get("action")),
"value": a.get("action"),
}
for a in predictions
]
tracker = await retrieve_tracker(endpoint, sender_id)
events = tracker.get("events", [])
session_actions_all = [a["name"] for a in _collect_actions(events)]
session_actions_unique = list(set(session_actions_all))
old_actions = [action["value"] for action in choices]
new_actions = [
{"name": action, "value": OTHER_ACTION + action}
for action in session_actions_unique
if action not in old_actions
]
choices = (
[{"name": "<create new action>", "value": NEW_ACTION}] + new_actions + choices
)
question = questionary.select("What is the next action of the bot?", choices)
action_name = await _ask_questions(question, sender_id, endpoint)
is_new_action = action_name == NEW_ACTION
if is_new_action:
# create new action
action_name = await _request_free_text_action(sender_id, endpoint)
if action_name.startswith(UTTER_PREFIX):
utter_message = await _request_free_text_utterance(
sender_id, endpoint, action_name
)
NEW_TEMPLATES[action_name] = {utter_message: ""}
elif action_name[:32] == OTHER_ACTION:
# action was newly created in the session, but not this turn
is_new_action = True
action_name = action_name[32:]
print(f"Thanks! The bot will now run {action_name}.\n")
return action_name, is_new_action
def _request_export_info() -> Tuple[Text, Text, Text]:
"""Request file path and export stories & nlu data to that path"""
# export training data and quit
questions = questionary.form(
export_stories=questionary.text(
message="Export stories to (if file exists, this "
"will append the stories)",
default=PATHS["stories"],
validate=io_utils.file_type_validator(
[".md"],
"Please provide a valid export path for the stories, e.g. 'stories.md'.",
),
),
export_nlu=questionary.text(
message="Export NLU data to (if file exists, this will "
"merge learned data with previous training examples)",
default=PATHS["nlu"],
validate=io_utils.file_type_validator(
[".md", ".json"],
"Please provide a valid export path for the NLU data, e.g. 'nlu.md'.",
),
),
export_domain=questionary.text(
message="Export domain file to (if file exists, this "
"will be overwritten)",
default=PATHS["domain"],
validate=io_utils.file_type_validator(
[".yml", ".yaml"],
"Please provide a valid export path for the domain file, e.g. 'domain.yml'.",
),
),
)
answers = questions.ask()
if not answers:
raise Abort()
return (answers["export_stories"], answers["export_nlu"], answers["export_domain"])
def _split_conversation_at_restarts(
events: List[Dict[Text, Any]]
) -> List[List[Dict[Text, Any]]]:
"""Split a conversation at restart events.
Returns an array of event lists, without the restart events."""
sub_conversations = []
current = []
for e in events:
if e.get("event") == "restart":
if current:
sub_conversations.append(current)
current = []
else:
current.append(e)
if current:
sub_conversations.append(current)
return sub_conversations
def _collect_messages(events: List[Dict[Text, Any]]) -> List[Message]:
"""Collect the message text and parsed data from the UserMessage events
into a list"""
from rasa.nlu.extractors.duckling_http_extractor import DucklingHTTPExtractor
from rasa.nlu.extractors.mitie_entity_extractor import MitieEntityExtractor
from rasa.nlu.extractors.spacy_entity_extractor import SpacyEntityExtractor
msgs = []
for event in events:
if event.get("event") == UserUttered.type_name:
data = event.get("parse_data", {})
for entity in data.get("entities", []):
excluded_extractors = [
DucklingHTTPExtractor.__name__,
SpacyEntityExtractor.__name__,
MitieEntityExtractor.__name__,
]
logger.debug(
"Exclude entity marking of following extractors"
" {} when writing nlu data "
"to file.".format(excluded_extractors)
)
if entity.get("extractor") in excluded_extractors:
data["entities"].remove(entity)
msg = Message.build(data["text"], data["intent"]["name"], data["entities"])
msgs.append(msg)
elif event.get("event") == UserUtteranceReverted.type_name and msgs:
msgs.pop() # user corrected the nlu, remove incorrect example
return msgs
def _collect_actions(events: List[Dict[Text, Any]]) -> List[Dict[Text, Any]]:
"""Collect all the `ActionExecuted` events into a list."""
return [evt for evt in events if evt.get("event") == ActionExecuted.type_name]
async def _write_stories_to_file(
export_story_path: Text, events: List[Dict[Text, Any]], domain: Domain
) -> None:
"""Write the conversation of the sender_id to the file paths."""
sub_conversations = _split_conversation_at_restarts(events)
io_utils.create_path(export_story_path)
if os.path.exists(export_story_path):
append_write = "a" # append if already exists
else:
append_write = "w" # make a new file if not
with open(export_story_path, append_write, encoding=io_utils.DEFAULT_ENCODING) as f:
i = 1
for conversation in sub_conversations:
parsed_events = rasa.core.events.deserialise_events(conversation)
tracker = DialogueStateTracker.from_events(
f"interactive_story_{i}", evts=parsed_events, slots=domain.slots
)
if any(
isinstance(event, UserUttered) for event in tracker.applied_events()
):
i += 1
f.write("\n" + tracker.export_stories(SAVE_IN_E2E))
def _filter_messages(msgs: List[Message]) -> List[Message]:
"""Filter messages removing those that start with INTENT_MESSAGE_PREFIX"""
filtered_messages = []
for msg in msgs:
if not msg.text.startswith(INTENT_MESSAGE_PREFIX):
filtered_messages.append(msg)
return filtered_messages
async def _write_nlu_to_file(
export_nlu_path: Text, events: List[Dict[Text, Any]]
) -> None:
"""Write the nlu data of the sender_id to the file paths."""
from rasa.nlu.training_data import TrainingData
msgs = _collect_messages(events)
msgs = _filter_messages(msgs)
# noinspection PyBroadException
try:
previous_examples = loading.load_data(export_nlu_path)
except Exception as e:
logger.debug(
"An exception occurred while trying to load the NLU data. {}".format(str(e))
)
# No previous file exists, use empty training data as replacement.
previous_examples = TrainingData()
nlu_data = previous_examples.merge(TrainingData(msgs))
# need to guess the format of the file before opening it to avoid a read
# in a write
nlu_format = _get_nlu_target_format(export_nlu_path)
if nlu_format == MARKDOWN:
stringified_training_data = nlu_data.nlu_as_markdown()
else:
stringified_training_data = nlu_data.nlu_as_json()
io_utils.write_text_file(stringified_training_data, export_nlu_path)
def _get_nlu_target_format(export_path: Text) -> Text:
guessed_format = loading.guess_format(export_path)
if guessed_format not in {MARKDOWN, RASA}:
if export_path.endswith(".json"):
guessed_format = RASA
else:
guessed_format = MARKDOWN
return guessed_format
def _entities_from_messages(messages: List[Message]) -> List[Text]:
"""Return all entities that occur in at least one of the messages."""
return list({e["entity"] for m in messages for e in m.data.get("entities", [])})
def _intents_from_messages(messages: List[Message]) -> Set[Text]:
"""Return all intents that occur in at least one of the messages."""
# set of distinct intents
distinct_intents = {m.data["intent"] for m in messages if "intent" in m.data}
return distinct_intents
async def _write_domain_to_file(
domain_path: Text, events: List[Dict[Text, Any]], old_domain: Domain
) -> None:
"""Write an updated domain file to the file path."""
io_utils.create_path(domain_path)
messages = _collect_messages(events)
actions = _collect_actions(events)
templates = NEW_TEMPLATES
# TODO for now there is no way to distinguish between action and form
collected_actions = list(
{e["name"] for e in actions if e["name"] not in default_action_names()}
)
new_domain = Domain(
intents=_intents_from_messages(messages),
entities=_entities_from_messages(messages),
slots=[],
templates=templates,
action_names=collected_actions,
form_names=[],
)
old_domain.merge(new_domain).persist_clean(domain_path)
async def _predict_till_next_listen(
endpoint: EndpointConfig,
sender_id: Text,
sender_ids: List[Text],
plot_file: Optional[Text],
) -> None:
"""Predict and validate actions until we need to wait for a user message."""
listen = False
while not listen:
result = await request_prediction(endpoint, sender_id)
predictions = result.get("scores")
probabilities = [prediction["score"] for prediction in predictions]
pred_out = int(np.argmax(probabilities))
action_name = predictions[pred_out].get("action")
policy = result.get("policy")
confidence = result.get("confidence")
await _print_history(sender_id, endpoint)
await _plot_trackers(
sender_ids, plot_file, endpoint, unconfirmed=[ActionExecuted(action_name)]
)
listen = await _validate_action(
action_name, policy, confidence, predictions, endpoint, sender_id
)
await _plot_trackers(sender_ids, plot_file, endpoint)
tracker_dump = await retrieve_tracker(
endpoint, sender_id, EventVerbosity.AFTER_RESTART
)
events = tracker_dump.get("events", [])
if len(events) >= 2:
last_event = events[-2] # last event before action_listen
# if bot message includes buttons the user will get a list choice to reply
# the list choice is displayed in place of action listen
if last_event.get("event") == BotUttered.type_name and last_event["data"].get(
"buttons", None
):
response = _get_button_choice(last_event)
if response != cliutils.FREE_TEXT_INPUT_PROMPT:
await send_message(endpoint, sender_id, response)
def _get_button_choice(last_event: Dict[Text, Any]) -> Text:
data = last_event["data"]
message = last_event.get("text", "")
choices = cliutils.button_choices_from_message_data(
data, allow_free_text_input=True
)
question = questionary.select(message, choices)
response = cliutils.payload_from_button_question(question)
return response
async def _correct_wrong_nlu(
corrected_nlu: Dict[Text, Any],
events: List[Dict[Text, Any]],
endpoint: EndpointConfig,
sender_id: Text,
) -> None:
"""A wrong NLU prediction got corrected, update core's tracker."""
revert_latest_user_utterance = UserUtteranceReverted().as_dict()
# `UserUtteranceReverted` also removes the `ACTION_LISTEN` event before, hence we
# have to replay it.
listen_for_next_message = ActionExecuted(ACTION_LISTEN_NAME).as_dict()
corrected_message = latest_user_message(events)
if corrected_message is None:
raise Exception("Failed to correct NLU data. User message not found.")
corrected_message["parse_data"] = corrected_nlu
await send_event(
endpoint,
sender_id,
[revert_latest_user_utterance, listen_for_next_message, corrected_message],
)
async def _correct_wrong_action(
corrected_action: Text,
endpoint: EndpointConfig,
sender_id: Text,
is_new_action: bool = False,
) -> None:
"""A wrong action prediction got corrected, update core's tracker."""
await send_action(
endpoint, sender_id, corrected_action, is_new_action=is_new_action
)
def _form_is_rejected(action_name: Text, tracker: Dict[Text, Any]) -> bool:
"""Check if the form got rejected with the most recent action name."""
return (
tracker.get("active_form", {}).get("name")
and action_name != tracker["active_form"]["name"]
and action_name != ACTION_LISTEN_NAME
)
def _form_is_restored(action_name: Text, tracker: Dict[Text, Any]) -> bool:
"""Check whether the form is called again after it was rejected."""
return (
tracker.get("active_form", {}).get("rejected")
and tracker.get("latest_action_name") == ACTION_LISTEN_NAME
and action_name == tracker.get("active_form", {}).get("name")
)
async def _confirm_form_validation(action_name, tracker, endpoint, sender_id) -> None:
"""Ask a user whether an input for a form should be validated.
Previous to this call, the active form was chosen after it was rejected."""
requested_slot = tracker.get("slots", {}).get(REQUESTED_SLOT)
validation_questions = questionary.confirm(
"Should '{}' validate user input to fill "
"the slot '{}'?".format(action_name, requested_slot)
)
validate_input = await _ask_questions(validation_questions, sender_id, endpoint)
if not validate_input:
# notify form action to skip validation
await send_event(
endpoint, sender_id, {"event": "form_validation", "validate": False}
)
elif not tracker.get("active_form", {}).get("validate"):
# handle contradiction with learned behaviour
warning_question = questionary.confirm(
"ERROR: FormPolicy predicted no form validation "
"based on previous training stories. "
"Make sure to remove contradictory stories "
"from training data. "
"Otherwise predicting no form validation "
"will not work as expected."
)
await _ask_questions(warning_question, sender_id, endpoint)
# notify form action to validate an input
await send_event(
endpoint, sender_id, {"event": "form_validation", "validate": True}
)
async def _validate_action(
action_name: Text,
policy: Text,
confidence: float,
predictions: List[Dict[Text, Any]],
endpoint: EndpointConfig,
sender_id: Text,
) -> bool:
"""Query the user to validate if an action prediction is correct.
Returns `True` if the prediction is correct, `False` otherwise."""
question = questionary.confirm(f"The bot wants to run '{action_name}', correct?")
is_correct = await _ask_questions(question, sender_id, endpoint)
if not is_correct:
action_name, is_new_action = await _request_action_from_user(
predictions, sender_id, endpoint
)
else:
is_new_action = False
tracker = await retrieve_tracker(endpoint, sender_id, EventVerbosity.AFTER_RESTART)
if _form_is_rejected(action_name, tracker):
# notify the tracker that form was rejected
await send_event(
endpoint,
sender_id,
{
"event": "action_execution_rejected",
"name": tracker["active_form"]["name"],
},
)
elif _form_is_restored(action_name, tracker):
await _confirm_form_validation(action_name, tracker, endpoint, sender_id)
if not is_correct:
await _correct_wrong_action(
action_name, endpoint, sender_id, is_new_action=is_new_action
)
else:
await send_action(endpoint, sender_id, action_name, policy, confidence)
return action_name == ACTION_LISTEN_NAME
def _as_md_message(parse_data: Dict[Text, Any]) -> Text:
"""Display the parse data of a message in markdown format."""
from rasa.nlu.training_data.formats import MarkdownWriter
if parse_data.get("text", "").startswith(INTENT_MESSAGE_PREFIX):
return parse_data["text"]
if not parse_data.get("entities"):
parse_data["entities"] = []
return MarkdownWriter.generate_message_md(parse_data)
def _validate_user_regex(latest_message: Dict[Text, Any], intents: List[Text]) -> bool:
"""Validate if a users message input is correct.
This assumes the user entered an intent directly, e.g. using
`/greet`. Return `True` if the intent is a known one."""
parse_data = latest_message.get("parse_data", {})
intent = parse_data.get("intent", {}).get("name")
if intent in intents:
return True
else:
return False
async def _validate_user_text(
latest_message: Dict[Text, Any], endpoint: EndpointConfig, sender_id: Text
) -> bool:
"""Validate a user message input as free text.
This assumes the user message is a text message (so NOT `/greet`)."""
parse_data = latest_message.get("parse_data", {})
text = _as_md_message(parse_data)
intent = parse_data.get("intent", {}).get("name")
entities = parse_data.get("entities", [])
if entities:
message = (
"Is the intent '{}' correct for '{}' and are "
"all entities labeled correctly?".format(intent, text)
)
else:
message = (
"Your NLU model classified '{}' with intent '{}'"
" and there are no entities, is this correct?".format(text, intent)
)
if intent is None:
print(f"The NLU classification for '{text}' returned '{intent}'")
return False
else:
question = questionary.confirm(message)
return await _ask_questions(question, sender_id, endpoint)
async def _validate_nlu(
intents: List[Text], endpoint: EndpointConfig, sender_id: Text
) -> None:
"""Validate if a user message, either text or intent is correct.
If the prediction of the latest user message is incorrect,
the tracker will be corrected with the correct intent / entities."""
tracker = await retrieve_tracker(endpoint, sender_id, EventVerbosity.AFTER_RESTART)
latest_message = latest_user_message(tracker.get("events", [])) or {}
if latest_message.get("text", "").startswith( # pytype: disable=attribute-error
INTENT_MESSAGE_PREFIX
):
valid = _validate_user_regex(latest_message, intents)
else:
valid = await _validate_user_text(latest_message, endpoint, sender_id)
if not valid:
corrected_intent = await _request_intent_from_user(
latest_message, intents, sender_id, endpoint
)
# corrected intents have confidence 1.0
corrected_intent["confidence"] = 1.0
events = tracker.get("events", [])
entities = await _correct_entities(latest_message, endpoint, sender_id)
corrected_nlu = {
"intent": corrected_intent,
"entities": entities,
"text": latest_message.get("text"),
}
await _correct_wrong_nlu(corrected_nlu, events, endpoint, sender_id)
async def _correct_entities(
latest_message: Dict[Text, Any], endpoint: EndpointConfig, sender_id: Text
) -> List[Dict[Text, Any]]:
"""Validate the entities of a user message.
Returns the corrected entities"""
from rasa.nlu.training_data.formats import MarkdownReader
parse_original = latest_message.get("parse_data", {})
entity_str = _as_md_message(parse_original)
question = questionary.text(
"Please mark the entities using [value](type) notation", default=entity_str
)
annotation = await _ask_questions(question, sender_id, endpoint)
# noinspection PyProtectedMember
parse_annotated = MarkdownReader().parse_training_example(annotation)
corrected_entities = _merge_annotated_and_original_entities(
parse_annotated, parse_original
)
return corrected_entities
def _merge_annotated_and_original_entities(
parse_annotated: Message, parse_original: Dict[Text, Any]
) -> List[Dict[Text, Any]]:
# overwrite entities which have already been
# annotated in the original annotation to preserve
# additional entity parser information
entities = parse_annotated.get("entities", [])[:]
for i, entity in enumerate(entities):
for original_entity in parse_original.get("entities", []):
if _is_same_entity_annotation(entity, original_entity):
entities[i] = original_entity
break
return entities
def _is_same_entity_annotation(entity, other) -> Any:
return entity["value"] == other["value"] and entity["entity"] == other["entity"]
async def _enter_user_message(sender_id: Text, endpoint: EndpointConfig) -> None:
"""Request a new message from the user."""
question = questionary.text("Your input ->")
message = await _ask_questions(question, sender_id, endpoint, lambda a: not a)
if message == (INTENT_MESSAGE_PREFIX + constants.USER_INTENT_RESTART):
raise RestartConversation()
await send_message(endpoint, sender_id, message)
async def is_listening_for_message(sender_id: Text, endpoint: EndpointConfig) -> bool:
"""Check if the conversation is in need for a user message."""
tracker = await retrieve_tracker(endpoint, sender_id, EventVerbosity.APPLIED)
for i, e in enumerate(reversed(tracker.get("events", []))):
if e.get("event") == UserUttered.type_name:
return False
elif e.get("event") == ActionExecuted.type_name:
return e.get("name") == ACTION_LISTEN_NAME
return False
async def _undo_latest(sender_id: Text, endpoint: EndpointConfig) -> None:
"""Undo either the latest bot action or user message, whatever is last."""
tracker = await retrieve_tracker(endpoint, sender_id, EventVerbosity.ALL)
# Get latest `UserUtterance` or `ActionExecuted` event.
last_event_type = None
for i, e in enumerate(reversed(tracker.get("events", []))):
last_event_type = e.get("event")
if last_event_type in {ActionExecuted.type_name, UserUttered.type_name}:
break
elif last_event_type == Restarted.type_name:
break
if last_event_type == ActionExecuted.type_name:
undo_action = ActionReverted().as_dict()
await send_event(endpoint, sender_id, undo_action)
elif last_event_type == UserUttered.type_name:
undo_user_message = UserUtteranceReverted().as_dict()
listen_for_next_message = ActionExecuted(ACTION_LISTEN_NAME).as_dict()
await send_event(
endpoint, sender_id, [undo_user_message, listen_for_next_message]
)
async def _fetch_events(
sender_ids: List[Union[Text, List[Event]]], endpoint: EndpointConfig
) -> List[List[Event]]:
"""Retrieve all event trackers from the endpoint for all sender ids."""
event_sequences = []
for sender_id in sender_ids:
if isinstance(sender_id, str):
tracker = await retrieve_tracker(endpoint, sender_id)
events = tracker.get("events", [])
for conversation in _split_conversation_at_restarts(events):
parsed_events = rasa.core.events.deserialise_events(conversation)
event_sequences.append(parsed_events)
else:
event_sequences.append(sender_id)
return event_sequences
async def _plot_trackers(
sender_ids: List[Union[Text, List[Event]]],
output_file: Optional[Text],
endpoint: EndpointConfig,
unconfirmed: Optional[List[Event]] = None,
):
"""Create a plot of the trackers of the passed sender ids.
This assumes that the last sender id is the conversation we are currently
working on. If there are events that are not part of this active tracker
yet, they can be passed as part of `unconfirmed`. They will be appended
to the currently active conversation."""
if not output_file or not sender_ids:
# if there is no output file provided, we are going to skip plotting
# same happens if there are no sender ids
return None
event_sequences = await _fetch_events(sender_ids, endpoint)
if unconfirmed:
event_sequences[-1].extend(unconfirmed)
graph = await visualize_neighborhood(
event_sequences[-1], event_sequences, output_file=None, max_history=2
)
from networkx.drawing.nx_pydot import write_dot
write_dot(graph, output_file)
def _print_help(skip_visualization: bool) -> None:
"""Print some initial help message for the user."""
if not skip_visualization:
visualization_url = DEFAULT_SERVER_FORMAT.format(
"http", DEFAULT_SERVER_PORT + 1
)
visualization_help = "Visualisation at {}/visualization.html.".format(
visualization_url
)
else:
visualization_help = ""
rasa.cli.utils.print_success(
"Bot loaded. {}\n"
"Type a message and press enter "
"(press 'Ctr-c' to exit). "
"".format(visualization_help)
)
async def record_messages(
endpoint: EndpointConfig,
sender_id: Text = UserMessage.DEFAULT_SENDER_ID,
max_message_limit: Optional[int] = None,
stories: Optional[Text] = None,
skip_visualization: bool = False,
):
"""Read messages from the command line and print bot responses."""
try:
try:
domain = await retrieve_domain(endpoint)
except ClientError:
logger.exception(
"Failed to connect to Rasa Core server at '{}'. "
"Is the server running?".format(endpoint.url)
)
return
intents = [next(iter(i)) for i in (domain.get("intents") or [])]
num_messages = 0
if not skip_visualization:
events_including_current_user_id = await _get_tracker_events_to_plot(
domain, stories, sender_id
)
plot_file = DEFAULT_STORY_GRAPH_FILE
await _plot_trackers(events_including_current_user_id, plot_file, endpoint)
else:
# `None` means that future `_plot_trackers` calls will also skip the
# visualization.
plot_file = None
events_including_current_user_id = []
_print_help(skip_visualization)
while not utils.is_limit_reached(num_messages, max_message_limit):
try:
if await is_listening_for_message(sender_id, endpoint):
await _enter_user_message(sender_id, endpoint)
await _validate_nlu(intents, endpoint, sender_id)
await _predict_till_next_listen(
endpoint, sender_id, events_including_current_user_id, plot_file
)
num_messages += 1
except RestartConversation:
await send_event(endpoint, sender_id, Restarted().as_dict())
await send_event(
endpoint, sender_id, ActionExecuted(ACTION_LISTEN_NAME).as_dict()
)
logger.info("Restarted conversation, starting a new one.")
except UndoLastStep:
await _undo_latest(sender_id, endpoint)
await _print_history(sender_id, endpoint)
except ForkTracker:
await _print_history(sender_id, endpoint)
events_fork = await _request_fork_from_user(sender_id, endpoint)
await send_event(endpoint, sender_id, Restarted().as_dict())
if events_fork:
for evt in events_fork:
await send_event(endpoint, sender_id, evt)
logger.info("Restarted conversation at fork.")
await _print_history(sender_id, endpoint)
await _plot_trackers(
events_including_current_user_id, plot_file, endpoint
)
except Abort:
return
except Exception:
logger.exception("An exception occurred while recording messages.")
raise
async def _get_tracker_events_to_plot(
domain: Dict[Text, Any], stories: Optional[Text], sender_id: Text
) -> List[Union[Text, List[Event]]]:
training_trackers = await _get_training_trackers(stories, domain)
number_of_trackers = len(training_trackers)
if number_of_trackers > MAX_NUMBER_OF_TRAINING_STORIES_FOR_VISUALIZATION:
rasa.cli.utils.print_warning(
f"You have {number_of_trackers} different story paths in "
f"your training data. Visualizing them is very resource "
f"consuming. Hence, the visualization will only show the stories "
f"which you created during interactive learning, but not your "
f"training stories."
)
training_trackers = []
training_data_events = [t.events for t in training_trackers]
events_including_current_user_id = training_data_events + [sender_id]
return events_including_current_user_id
async def _get_training_trackers(
stories: Optional[Text], domain: Dict[str, Any]
) -> List[DialogueStateTracker]:
from rasa.core import training
return await training.load_data(
stories,
Domain.from_dict(domain),
augmentation_factor=0,
use_story_concatenation=False,
)
def _serve_application(app: Sanic, stories, skip_visualization) -> Sanic:
"""Start a core server and attach the interactive learning IO."""
endpoint = EndpointConfig(url=DEFAULT_SERVER_URL)
async def run_interactive_io(running_app: Sanic):
"""Small wrapper to shut down the server once cmd io is done."""
await record_messages(
endpoint=endpoint,
stories=stories,
skip_visualization=skip_visualization,
sender_id=uuid.uuid4().hex,
)
logger.info("Killing Sanic server now.")
running_app.stop() # kill the sanic server
app.add_task(run_interactive_io)
update_sanic_log_level()
app.run(host="0.0.0.0", port=DEFAULT_SERVER_PORT)
return app
def start_visualization(image_path: Text = None) -> None:
"""Add routes to serve the conversation visualization files."""
app = Sanic(__name__)
# noinspection PyUnusedLocal
@app.exception(NotFound)
async def ignore_404s(request, exception):
return response.text("Not found", status=404)
# noinspection PyUnusedLocal
@app.route(VISUALIZATION_TEMPLATE_PATH, methods=["GET"])
def visualisation_html(request):
return response.file(visualization.visualization_html_path())
# noinspection PyUnusedLocal
@app.route("/visualization.dot", methods=["GET"])
def visualisation_png(request):
try:
headers = {"Cache-Control": "no-cache"}
return response.file(os.path.abspath(image_path), headers=headers)
except FileNotFoundError:
return response.text("", 404)
update_sanic_log_level()
app.run(host="0.0.0.0", port=DEFAULT_SERVER_PORT + 1, access_log=False)
# noinspection PyUnusedLocal
async def train_agent_on_start(
args, endpoints, additional_arguments, app, loop
) -> None:
_interpreter = NaturalLanguageInterpreter.create(args.get("nlu"), endpoints.nlu)
model_directory = args.get("out", tempfile.mkdtemp(suffix="_core_model"))
_agent = await train(
args.get("domain"),
args.get("stories"),
model_directory,
_interpreter,
endpoints,
args.get("dump_stories"),
args.get("config")[0],
None,
additional_arguments,
)
app.agent = _agent
async def wait_til_server_is_running(
endpoint, max_retries=30, sleep_between_retries=1
) -> bool:
"""Try to reach the server, retry a couple of times and sleep in between."""
while max_retries:
try:
r = await retrieve_status(endpoint)
logger.info(f"Reached core: {r}")
if not r.get("is_ready"):
# server did not finish loading the agent yet
# in this case, we need to wait till the model trained
# so we might be sleeping for a while...
await asyncio.sleep(sleep_between_retries)
continue
else:
# server is ready to go
return True
except ClientError:
max_retries -= 1
if max_retries:
await asyncio.sleep(sleep_between_retries)
return False
def run_interactive_learning(
stories: Text = None,
skip_visualization: bool = False,
server_args: Dict[Text, Any] = None,
additional_arguments: Dict[Text, Any] = None,
):
"""Start the interactive learning with the model of the agent."""
global SAVE_IN_E2E
server_args = server_args or {}
if server_args.get("nlu_data"):
PATHS["nlu"] = server_args["nlu_data"]
if server_args.get("stories"):
PATHS["stories"] = server_args["stories"]
if server_args.get("domain"):
PATHS["domain"] = server_args["domain"]
SAVE_IN_E2E = server_args["e2e"]
if not skip_visualization:
p = Process(target=start_visualization, args=(DEFAULT_STORY_GRAPH_FILE,))
p.daemon = True
p.start()
else:
p = None
app = run.configure_app(enable_api=True)
endpoints = AvailableEndpoints.read_endpoints(server_args.get("endpoints"))
# before_server_start handlers make sure the agent is loaded before the
# interactive learning IO starts
if server_args.get("model"):
app.register_listener(
partial(run.load_agent_on_start, server_args.get("model"), endpoints, None),
"before_server_start",
)
else:
app.register_listener(
partial(train_agent_on_start, server_args, endpoints, additional_arguments),
"before_server_start",
)
_serve_application(app, stories, skip_visualization)
if not skip_visualization and p is not None:
p.terminate() # pytype: disable=attribute-error
p.join() # pytype: disable=attribute-error
|
the-stack_0_18707 | from threading import RLock
from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Tuple
from triad import to_uuid
from tune.concepts.flow import (
Monitor,
Trial,
TrialDecision,
TrialJudge,
TrialReport,
TrialReportHeap,
)
class RungHeap:
def __init__(self, n: int):
self._lock = RLock()
self._n = n
self._heap = TrialReportHeap(min_heap=False)
self._bests: List[float] = []
def __len__(self) -> int:
with self._lock:
return len(self._heap)
@property
def capacity(self) -> int:
return self._n
@property
def best(self) -> float:
with self._lock:
return self._bests[-1] if len(self._bests) > 0 else float("nan")
@property
def bests(self) -> List[float]:
with self._lock:
return self._bests
@property
def full(self) -> bool:
with self._lock:
return self.capacity <= len(self)
def __contains__(self, tid: str) -> bool:
with self._lock:
return tid in self._heap
def values(self) -> Iterable[TrialReport]:
return self._heap.values()
def push(self, report: TrialReport) -> bool:
with self._lock:
if len(self) == 0:
best = report.sort_metric
else:
best = min(self.best, report.sort_metric)
self._heap.push(report)
self._bests.append(best)
return (
len(self._heap) <= self._n
or self._heap.pop().trial_id != report.trial_id
)
class _PerTrial:
def __init__(self, parent: "_PerPartition") -> None:
self._history: List[TrialReport] = []
self._parent = parent
self._active = True
def can_promote(self, report: TrialReport) -> Tuple[bool, str]:
reasons: List[str] = []
if self._active:
can_accept = self._parent.can_accept(report.trial)
early_stop = self._parent._parent._trial_early_stop(
report, self._history, self._parent._rungs
)
self._active = can_accept and not early_stop
if not can_accept:
reasons.append("can't accept new")
if early_stop:
reasons.append("trial early stop")
if self._active:
self._history.append(report)
can_push = self._parent._rungs[report.rung].push(report)
if not can_push:
# data = sorted(
# (x for x in self._parent._rungs[report.rung].values()),
# key=lambda x: x["sort_metric"],
# )
# reasons.append("not best: " + json.dumps(data))
reasons.append("not best")
return can_push, ", ".join(reasons)
return False, ", ".join(reasons)
def judge(self, report: TrialReport) -> TrialDecision:
if report.rung >= len(self._parent._parent.schedule) - 1:
self._history.append(report)
self._parent._rungs[report.rung].push(report)
return TrialDecision(
report, budget=0, should_checkpoint=True, reason="last"
)
promote, reason = self.can_promote(report)
if not promote:
return TrialDecision(
report, budget=0, should_checkpoint=True, reason=reason
)
next_budget = self._parent.get_budget(report.trial, report.rung + 1)
return TrialDecision(
report,
budget=next_budget,
should_checkpoint=next_budget <= 0
or self._parent._parent.always_checkpoint,
reason="" if next_budget > 0 else "budget==0",
)
class _PerPartition:
def __init__(self, parent: "ASHAJudge", keys: List[Any]):
self._keys = keys
self._data: Dict[str, _PerTrial] = {}
self._lock = RLock()
self._parent = parent
self._rungs: List[RungHeap] = [RungHeap(x[1]) for x in self._parent.schedule]
self._active = True
self._accepted_ids: Set[str] = set()
def can_accept(self, trial: Trial) -> bool:
with self._lock:
if self._active:
self._active = not self._parent._study_early_stop(
self._keys, self._rungs
)
if self._active:
self._accepted_ids.add(trial.trial_id)
return True
# if not active, can only accept existing trials
return trial.trial_id in self._accepted_ids
def get_budget(self, trial: Trial, rung: int) -> float:
if rung >= len(self._parent.schedule) or not self.can_accept(trial):
return 0.0 # pragma: no cover
return self._parent.schedule[rung][0]
def judge(self, report: TrialReport) -> TrialDecision:
return self._get_judge(report.trial).judge(report)
def _get_judge(self, trial: Trial) -> _PerTrial:
key = trial.trial_id
with self._lock:
if key not in self._data:
self._data[key] = _PerTrial(self)
return self._data[key]
class ASHAJudge(TrialJudge):
def __init__(
self,
schedule: List[Tuple[float, int]],
always_checkpoint: bool = False,
study_early_stop: Optional[Callable[[List[Any], List[RungHeap]], bool]] = None,
trial_early_stop: Optional[
Callable[[TrialReport, List[TrialReport], List[RungHeap]], bool]
] = None,
monitor: Optional[Monitor] = None,
):
super().__init__(monitor=monitor)
self._lock = RLock()
self._data: Dict[str, _PerPartition] = {}
self._schedule = schedule
self._always_checkpoint = always_checkpoint
self._study_early_stop = study_early_stop or _default_study_early_stop
self._trial_early_stop = trial_early_stop or _default_trial_early_stop
@property
def schedule(self) -> List[Tuple[float, int]]:
return self._schedule
@property
def always_checkpoint(self) -> bool:
return self._always_checkpoint
def can_accept(self, trial: Trial) -> bool:
return self._get_judge(trial).can_accept(trial)
def get_budget(self, trial: Trial, rung: int) -> float:
budget = self._get_judge(trial).get_budget(trial, rung)
self.monitor.on_get_budget(trial, rung, budget)
return budget
def judge(self, report: TrialReport) -> TrialDecision:
self.monitor.on_report(report)
decision = self._get_judge(report.trial).judge(report)
self.monitor.on_judge(decision)
return decision
def _get_judge(self, trial: Trial) -> _PerPartition:
key = to_uuid(trial.keys)
with self._lock:
if key not in self._data:
self._data[key] = _PerPartition(self, trial.keys)
return self._data[key]
def _default_study_early_stop(keys: List[Any], rungs: List["RungHeap"]) -> bool:
return all(r.full for r in rungs)
def _default_trial_early_stop(
report: TrialReport, reports: List[TrialReport], rungs: List["RungHeap"]
) -> bool:
return False
|
the-stack_0_18708 | # def found_symbol(construction):
# n = construction
# matrix = []
# for _ in range(n):
# matrix.append([ch for ch in input()])
# symbol = input()
# found = False
# for i in range(n):
# if found:
# break
# for j in range(n):
# if matrix[i][j] == symbol:
# print(f'({i}, {j})')
# found = True
# break
# if not found:
# print(f"{symbol} does not occur in the matrix")
#
#
# found_symbol(int(input()))
def read_matrix(n):
matrix = []
for _ in range(n):
matrix.append([ch for ch in input()])
return matrix
def find_char(matrix, char):
for i in range(len(matrix)):
for j in range(len(matrix[0])):
current_char = matrix[i][j]
if current_char == char:
return (i, j)
return f'{char} does not occur in the matrix'
matrix = read_matrix(int(input()))
print(find_char(matrix, input()))
|
the-stack_0_18709 | #!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the -alertnotify, -blocknotify and -walletnotify options."""
import os
from test_framework.test_framework import IndianCoinTestFramework
from test_framework.util import assert_equal, wait_until, connect_nodes_bi
class NotificationsTest(IndianCoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
def setup_network(self):
self.alert_filename = os.path.join(self.options.tmpdir, "alert.txt")
self.block_filename = os.path.join(self.options.tmpdir, "blocks.txt")
self.tx_filename = os.path.join(self.options.tmpdir, "transactions.txt")
# -alertnotify and -blocknotify on node0, walletnotify on node1
self.extra_args = [["-blockversion=4",
"-alertnotify=echo %%s >> %s" % self.alert_filename,
"-blocknotify=echo %%s >> %s" % self.block_filename],
["-blockversion=211",
"-rescan",
"-walletnotify=echo %%s >> %s" % self.tx_filename]]
super().setup_network()
def run_test(self):
self.log.info("test -blocknotify")
block_count = 10
blocks = self.nodes[1].generate(block_count)
# wait at most 10 seconds for expected file size before reading the content
wait_until(lambda: os.path.isfile(self.block_filename) and os.stat(self.block_filename).st_size >= (block_count * 65), timeout=10)
# file content should equal the generated blocks hashes
with open(self.block_filename, 'r') as f:
assert_equal(sorted(blocks), sorted(f.read().splitlines()))
self.log.info("test -walletnotify")
# wait at most 10 seconds for expected file size before reading the content
wait_until(lambda: os.path.isfile(self.tx_filename) and os.stat(self.tx_filename).st_size >= (block_count * 65), timeout=10)
# file content should equal the generated transaction hashes
txids_rpc = list(map(lambda t: t['txid'], self.nodes[1].listtransactions("*", block_count)))
with open(self.tx_filename, 'r') as f:
assert_equal(sorted(txids_rpc), sorted(f.read().splitlines()))
os.remove(self.tx_filename)
self.log.info("test -walletnotify after rescan")
# restart node to rescan to force wallet notifications
self.restart_node(1)
connect_nodes_bi(self.nodes, 0, 1)
wait_until(lambda: os.path.isfile(self.tx_filename) and os.stat(self.tx_filename).st_size >= (block_count * 65), timeout=10)
# file content should equal the generated transaction hashes
txids_rpc = list(map(lambda t: t['txid'], self.nodes[1].listtransactions("*", block_count)))
with open(self.tx_filename, 'r') as f:
assert_equal(sorted(txids_rpc), sorted(f.read().splitlines()))
# Mine another 41 up-version blocks. -alertnotify should trigger on the 51st.
self.log.info("test -alertnotify")
self.nodes[1].generate(51)
self.sync_all()
# Give indiancoind 10 seconds to write the alert notification
wait_until(lambda: os.path.isfile(self.alert_filename) and os.path.getsize(self.alert_filename), timeout=10)
with open(self.alert_filename, 'r', encoding='utf8') as f:
alert_text = f.read()
# Mine more up-version blocks, should not get more alerts:
self.nodes[1].generate(2)
self.sync_all()
with open(self.alert_filename, 'r', encoding='utf8') as f:
alert_text2 = f.read()
self.log.info("-alertnotify should not continue notifying for more unknown version blocks")
assert_equal(alert_text, alert_text2)
if __name__ == '__main__':
NotificationsTest().main()
|
the-stack_0_18712 | import json
import re
import time
from datetime import datetime
import requests
import urllib3
from kakao.common import pretty_print, close
urllib3.disable_warnings()
header_map = {
"Accept": "application/json, text/plain, */*",
"Content-Type": "application/json;charset=utf-8",
"Origin": "https://vaccine-map.kakao.com",
"Accept-Language": "en-us",
"User-Agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 14_7 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 KAKAOTALK 9.4.2",
"Referer": "https://vaccine-map.kakao.com/",
"Accept-Encoding": "gzip, deflate",
"Connection": "Keep-Alive",
"Keep-Alive": "timeout=5, max=1000"
}
headers_vaccine = {
"Accept": "application/json, text/plain, */*",
"Content-Type": "application/json;charset=utf-8",
"Origin": "https://vaccine.kakao.com",
"Accept-Language": "en-us",
"User-Agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 14_7 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 KAKAOTALK 9.4.2",
"Referer": "https://vaccine.kakao.com/",
"Accept-Encoding": "gzip, deflate",
"Connection": "Keep-Alive",
"Keep-Alive": "timeout=5, max=1000"
}
# pylint: disable=too-many-locals,too-many-statements,too-many-branches,too-many-arguments
def find_vaccine(cookie, search_time, vaccine_type, top_x, top_y, bottom_x, bottom_y, only_left):
url = 'https://vaccine-map.kakao.com/api/v3/vaccine/left_count_by_coords'
data = {"bottomRight": {"x": bottom_x, "y": bottom_y}, "onlyLeft": only_left, "order": "count",
"topLeft": {"x": top_x, "y": top_y}}
done = False
found = None
prevSearch = None
while not done:
try:
time.sleep(search_time)
response = requests.post(url, data=json.dumps(data), headers=header_map, verify=False, timeout=5)
try:
json_data = json.loads(response.text)
for x in list(reversed(json_data.get("organizations"))):
if x.get('status') == "AVAILABLE" or x.get('leftCounts') != 0:
if prevSearch:
prev = list(filter(lambda org: org.get('orgCode') == x.get('orgCode'), prevSearch))
if len(prev) and prev[0].get('leftCounts') == x.get('leftCounts'):
continue
print(f"{x.get('orgName')} 에서 백신을 {x.get('leftCounts')}개 발견했습니다.")
found, target = check_vaccine_availablity(x, vaccine_type, cookie)
if found:
print(f"주소는: {x.get('address')} 입니다.")
done = True
break
else:
print("선택한 백신 종류가 없습니다.")
if not done:
prevSearch = json_data.get("organizations")
pretty_print(json_data)
print(datetime.now())
except json.decoder.JSONDecodeError as decodeerror:
print("JSONDecodeError : ", decodeerror)
print("JSON string : ", response.text)
close()
except requests.exceptions.Timeout as timeouterror:
print("Timeout Error : ", timeouterror)
except requests.exceptions.SSLError as sslerror:
print("SSL Error : ", sslerror)
close()
except requests.exceptions.ConnectionError as connectionerror:
print("Connection Error : ", connectionerror)
# See psf/requests#5430 to know why this is necessary.
if not re.search('Read timed out', str(connectionerror), re.IGNORECASE):
close()
except requests.exceptions.HTTPError as httperror:
print("Http Error : ", httperror)
close()
except requests.exceptions.RequestException as error:
print("AnyException : ", error)
close()
vaccine_found_code = None
if found is None:
find_vaccine(cookie, search_time, vaccine_type, top_x, top_y, bottom_x, bottom_y, only_left)
return None
else:
vaccine_found_code = found.get('vaccineCode')
organization_code = target
if vaccine_found_code and try_reservation(organization_code, vaccine_found_code, cookie):
return None
else:
find_vaccine(cookie, search_time, vaccine_type, top_x, top_y, bottom_x, bottom_y, only_left)
return None
def check_vaccine_availablity(data, vaccine_type, cookie):
check_organization_url = f'https://vaccine.kakao.com/api/v3/org/org_code/{data.get("orgCode")}'
check_organization_response = requests.get(check_organization_url, headers=headers_vaccine, cookies=cookie, verify=False)
check_organization_data = json.loads(check_organization_response.text).get("lefts")
for x in vaccine_type:
find = list(filter(lambda v: v.get('vaccineCode') == x and v.get('leftCount') != 0, check_organization_data))
if len(find):
print(f"{find[0].get('vaccineName')} {find[0].get('leftCount')}개가 있습니다.")
return [find[0], data.get("orgCode")]
return [False, False]
def try_reservation(organization_code, vaccine_type, jar):
reservation_url = 'https://vaccine.kakao.com/api/v2/reservation'
data = {"from": "List", "vaccineCode": vaccine_type,
"orgCode": organization_code, "distance": None}
response = requests.post(reservation_url, data=json.dumps(data), headers=headers_vaccine, cookies=jar, verify=False)
response_json = json.loads(response.text)
if response_json.get('error'):
print("사용자 정보를 불러오는데 실패하였습니다.")
print("Chrome 브라우저에서 카카오에 제대로 로그인되어있는지 확인해주세요.")
close()
else:
reservation_status = response_json['code']
if reservation_status == "NO_VACANCY":
print("잔여백신 접종 신청이 선착순 마감되었습니다.")
retry_reservation(organization_code, vaccine_type, jar)
elif reservation_status == "TIMEOUT":
print("TIMEOUT, 예약을 재시도합니다.")
retry_reservation(organization_code, vaccine_type, jar)
elif reservation_status == "SUCCESS":
print("백신접종신청 성공!!!")
organization_code_success = response_json.get("organization")
print(
f"병원이름: {organization_code_success.get('orgName')}\t" +
f"전화번호: {organization_code_success.get('phoneNumber')}\t" +
f"주소: {organization_code_success.get('address')}")
close(success=True)
else:
print("ERROR. 아래 메시지를 보고, 예약이 신청된 병원 또는 1339에 예약이 되었는지 확인해보세요.")
print(response.text)
close()
def retry_reservation(organization_code, vaccine_type, jar):
reservation_url = 'https://vaccine.kakao.com/api/v2/reservation/retry'
data = {"from": "List", "vaccineCode": vaccine_type,
"orgCode": organization_code, "distance": None}
response = requests.post(reservation_url, data=json.dumps(data), headers=headers_vaccine, cookies=jar, verify=False)
response_json = json.loads(response.text)
if response_json.get('error'):
print("사용자 정보를 불러오는데 실패하였습니다.")
print("Chrome 브라우저에서 카카오에 제대로 로그인되어있는지 확인해주세요.")
close()
else:
reservation_status = response_json['code']
if reservation_status == "NO_VACANCY":
print("잔여백신 접종 신청이 선착순 마감되었습니다.")
elif reservation_status == "SUCCESS":
print("백신접종신청 성공!!!")
organization_code_success = response_json.get("organization")
print(
f"병원이름: {organization_code_success.get('orgName')}\t" +
f"전화번호: {organization_code_success.get('phoneNumber')}\t" +
f"주소: {organization_code_success.get('address')}")
close(success=True)
else:
print("ERROR. 아래 메시지를 보고, 예약이 신청된 병원 또는 1339에 예약이 되었는지 확인해보세요.")
print(response.text)
close()
|
the-stack_0_18714 | '''Eyes wiki db module
'''
import sqlalchemy as sa
from sqlalchemy_utils.types import ChoiceType
from eyes.db import Base
from eyes.type import Label
class WikiEntity(Base):
'''Wiki entities
'''
__tablename__ = 'wiki_entities'
id = sa.Column(
sa.Integer,
primary_key=True,
autoincrement=True,
)
name = sa.Column(
sa.String(32),
unique=True,
nullable=False,
)
type = sa.Column(sa.String(32))
label = sa.Column(ChoiceType(
Label,
impl=sa.Integer(),
))
alias = sa.Column(
sa.JSON,
default=[],
)
|
the-stack_0_18715 | import pandas as pd
import re
import joblib as jb
import numpy as np
import json
from scipy.sparse import hstack, csr_matrix
# <================================================= MODEL =====================================================>
#Load models
model_rf = jb.load('model/model_rf.pk.z')
model_lgbm = jb.load('model/model_lgbm.pk;.z')
# <===================================================== Predicoes =========================================================>
def prediction(data):
feature_array = clean_data(data)
if feature_array is None:
return 0
pred_rf = model_rf.predict_proba(feature_array)[0][1]
pred_lgbm = model_lgbm.predict_proba(feature_array)[0][1]
pred = 0.2*pred_rf + 0.8*pred_lgbm
return pred
# <================================================= Limpando os dados =======================================>
def clean_data(data):
cols = ['price','regdate','mileage', 'vidro_elétrico','air_bag',
'trava_elétrica','ar_condicionado','direção_hidráulica','alarme','som',
'sensor_de_ré', 'financial_com_multas','financial_de_leilão','financial_financiado','financial_ipva_pago',
'brand_ford','brand_gmchevrolet', 'brand_vwvolkswagen','model_agile','model_captiva',
'model_celta', 'model_cobalt','model_compass','model_corsa','model_crossfox',
'model_cruze', 'model_focus','model_fox', 'model_gol','model_golf',
'model_grandsaveiro','model_jetta','model_joy','model_ka','model_parati',
'model_passat','model_polo','model_prisma','model_ranger','model_s10',
'model_sandero','model_saveiro', 'car_steering_assistida','car_steering_elétrica','car_steering_hidráulica','car_steering_mecnica']
clean_df = pd.DataFrame(columns = cols, index = [0])
clean_df = clean_price(data, clean_df)
clean_df = clean_regdate(data, clean_df)
clean_df = clean_mileage(data, clean_df)
clean_df = clean_model(data, clean_df)
#clean_df = clean_steering(data, clean_df)
clean_df = clean_extra(data, clean_df)
clean_df = clean_financial(data, clean_df)
clean_df = clean_brand(data,clean_df)
if any(clean_df.isnull().iloc[0]):
return None
feature_array = clean_df.iloc[0].to_numpy()
#define a ordem para alimentar o modelo
feature_array = feature_array.reshape(-1,46)
return feature_array
# <================================================= clean_price =====================================================>
def clean_price(data, clean_df):
if data['price'] =='':
clean_df['price'] = None
else:
numeric_price = int(data['price'])
clean_df['price'] = numeric_price
return clean_df
# <================================================= clean_price =====================================================>
def clean_regdate(data, clean_df):
if data['regdate'] =='0' or data['regdate'] =='' :
clean_df['regdate'] = None
else:
numeric_regdate = int(data['regdate'])
clean_df['regdate'] = numeric_regdate
return clean_df
# <================================================= clean_mileage =====================================================>
def clean_mileage(data, clean_df):
if data['mileage'] =='0':
clean_df['mileage'] = None
else:
numeric_mileage = int(data['mileage'])
clean_df['mileage'] = numeric_mileage
return clean_df
# <================================================ clean_model =====================================================>
def clean_model(data, clean_df):
models = ['agile','captiva','celta', 'cobalt','compass','corsa','crossfox','cruze', 'focus','fox','gol','golf','grandsaveiro',
'jetta','joy','ka', 'parati','passat','polo','prisma','ranger','s10',
'sandero','saveiro']
for model in models:
clean_df['model_'+ model] = np.where(data['model'] ==model,1,0)
return clean_df
# <================================================= clean_steering =====================================================>
#def clean_steering(data, clean_df):
#steerings = ['car_steering_assistida','car_steering_elétrica','car_steering_hidráulica','car_steering_mecnica']
#for steering in steerings:
#clean_df['car_steering_' + steering] = np.where(data['steering'] ==steering,1,0)
#return clean_df
# <================================================= clean_extra =====================================================>
def clean_extra(data, clean_df):
extras= ['vidro_elétrico',
'air_bag','trava_elétrica','ar_condicionado','direção_hidráulica','alarme',
'som','sensor_de_ré', 'car_steering_assistida','car_steering_elétrica','car_steering_hidráulica','car_steering_mecnica']
for extra in extras:
clean_df[extra] = np.where(data['extra'] ==extra,1,0)
return clean_df
# <================================================= clean_financial =====================================================>
def clean_financial(data,clean_df):
financials = [ 'com_multas','de_leilão',
'financiado','ipva_pago']
for financial in financials:
clean_df['financial_'+financial] = np.where(data['financial'] == financial,1,0)
return clean_df
# <================================================= clean_brand =====================================================>
def clean_brand(data,clean_df):
brands = [ 'ford','gmchevrolet', 'vwvolkswagen']
for brand in brands:
clean_df['brand_'+brand] = np.where(data['brand'] == brand,1,0)
return clean_df
|
the-stack_0_18716 | #!/usr/bin/env python3
# Heresh Fattahi
#
import numpy as np
import argparse
import os
import glob
import isce
import isceobj
from osgeo import gdal
from osgeo.gdalconst import GA_ReadOnly
#import s1a_isce_utils as ut
from isceobj.Planet.Planet import Planet
import shelve
GDAL2NUMPY_DATATYPE = {
1 : np.uint8,
2 : np.uint16,
3 : np.int16,
4 : np.uint32,
5 : np.int32,
6 : np.float32,
7 : np.float64,
10: np.complex64,
11: np.complex128,
}
def createParser():
'''
Command line parser.
'''
parser = argparse.ArgumentParser( description='filters the densOffset, oversamples it and adds back to the geometry offset')
parser.add_argument('-i', '--input_directory', dest='input', type=str, default=None,
help='The directory which contains all pairs (e.g.: ~/hfattahi/process/testSentinel/merged/interferograms). ')
parser.add_argument('-f', '--file_list', nargs = '+', dest='fileList', type=str, default=None,
help='A list of files that will be used in pysar e.g.: filt_fine.unw filt_fine.cor')
parser.add_argument('-o', '--orbit_direction', dest='orbitDirection', type=str, default=None,
help='Direction of the orbit: ascending, or descending ')
parser.add_argument('-s', '--shelve_dir', dest='shelveDir', type=str, default=None,
help='A directory that contains a shelve file to extract common metada for the stack: e.g.: ')
parser.add_argument('-b', '--baseline_dir', dest='baselineDir', type=str, default=None,
help=' directory with baselines ')
parser.add_argument('-g', '--geometry_dir', dest='geometryDir', type=str, default=None,
help=' directory with geometry files ')
return parser
def cmdLineParse(iargs = None):
parser = createParser()
return parser.parse_args(args=iargs)
def extractIsceMetadata(shelveFile):
with shelve.open(shelveFile, flag='r') as mdb:
burst = mdb['frame']
#reference = ut.loadProduct(shelveFile)
#burst = reference.bursts[0]
#burstEnd = reference.bursts[-1]
metadata = {}
metadata['radarWavelength'] = burst.radarWavelegth
metadata['rangePixelSize'] = burst.instrument.rangePixelSize
metadata['prf'] = burst.PRF
metadata['startUTC'] = burst.sensingStart
metadata['stopUTC'] = burst.sensingStop
metadata['startingRange'] = burst.startingRange
time_seconds = burst.sensingStart.hour*3600.0 + burst.sensingStart.minute*60.0 + burst.sensingStart.second
metadata['CENTER_LINE_UTC'] = time_seconds
Vs = np.linalg.norm(burst.orbit.interpolateOrbit(burst.sensingMid, method='hermite').getVelocity())
metadata['satelliteSpeed'] = Vs
metadata['azimuthTimeInterval'] = 1./burst.PRF #azimuthTimeInterval
metadata['azimuthPixelSize'] = Vs*metadata['azimuthTimeInterval']#burst.azimuthTimeInterval
#metadata['azimuthPixelSize'] = burst.instrument.azimuthPixelSize
tstart = burst.sensingStart
tend = burst.sensingStop
tmid = tstart + 0.5*(tend - tstart)
orbit = burst.orbit
peg = orbit.interpolateOrbit(tmid, method='hermite')
refElp = Planet(pname='Earth').ellipsoid
llh = refElp.xyz_to_llh(peg.getPosition())
hdg = orbit.getENUHeading(tmid)
refElp.setSCH(llh[0], llh[1], hdg)
metadata['earthRadius'] = refElp.pegRadCur
metadata['altitude'] = llh[2]
return metadata
def write_rsc(isceFile, dates, metadata, baselineDict):
rscDict={}
rscDict['WIDTH'] = metadata['width']
#rscDict['X_FIRST'] =
#rscDict['X_STEP'] =
#rscDict['X_UNIT'] =
rscDict['FILE_LENGTH'] = metadata['length']
#rscDict['Y_FIRST'] =
#rscDict['Y_STEP'] =
#rscDict['Y_UNIT'] =
rscDict['WAVELENGTH'] = metadata['radarWavelength']
rscDict['DATE12'] = dates[0][2:] + '-' + dates[1][2:]
#rscDict['DATE'] = dates[0]
rscDict['PLATFORM'] = 'Sentinel1'
rscDict['RANGE_PIXEL_SIZE'] = metadata['rangePixelSize']
rscDict['AZIMUTH_PIXEL_SIZE'] = metadata['azimuthPixelSize']
rscDict['EARTH_RADIUS'] = metadata['earthRadius']
rscDict['CENTER_LINE_UTC'] = metadata['CENTER_LINE_UTC']
rscDict['HEIGHT'] = metadata['altitude']
rscDict['STARTING_RANGE'] = metadata['startingRange']
rscDict['STARTING_RANGE1'] = metadata['startingRange']
#rscDict['HEADING'] =
#rscDict['LOOK_REF1']=
#rscDict['LOOK_REF2'] =
#rscDict['LAT_REF1'] =
#rscDict['LON_REF1'] =
#rscDict['LAT_REF2'] =
#rscDict['LON_REF2'] =
#rscDict['LAT_REF3'] =
#rscDict['LON_REF3'] =
#rscDict['LAT_REF4'] =
#rscDict['LON_REF4'] =
#rscDict['PRF'] =
rscDict['ANTENNA_SIDE'] = -1
#rscDict['HEADING'] =
rscDict['ORBIT_DIRECTION'] = metadata['orbitDirection']
rscDict['PROCESSOR'] = 'isce'
outname = isceFile + '.rsc'
print('writing ', outname)
f = open(outname,'w')
for key in rscDict.keys():
f.write(key+' ' + str(rscDict[key]) +'\n')
f.close()
outBaselineName = os.path.join(os.path.dirname(isceFile), dates[0][2:] + '_' + dates[1][2:] + '_baseline.rsc')
f = open(outBaselineName,'w')
f.write("P_BASELINE_TOP_HDR " + str(baselineDict[dates[1]] - baselineDict[dates[0]]) + '\n')
f.write("P_BASELINE_BOTTOM_HDR " + str(baselineDict[dates[1]] - baselineDict[dates[0]]) + '\n')
f.close()
return None
def prepare_stack(inputDir, filePattern, metadata, baselineDict):
unwDirs = glob.glob(os.path.join(inputDir,'*/'+filePattern))
isceFile = unwDirs[0]
ds = gdal.Open(isceFile, gdal.GA_ReadOnly)
length = ds.RasterYSize
width = ds.RasterXSize
metadata['length'] = length
metadata['width'] = width
for isceFile in unwDirs:
dirname = os.path.dirname(isceFile)
dates = os.path.basename(dirname).split('_')
write_rsc(isceFile, dates, metadata, baselineDict)
#cmd = "mv " + isceFile + " " + os.path.join(os.path.dirname(isceFile) , "filt_" + dates[0][2:] + '_' + dates[1][2:] + "." + filePattern.split(".")[-1])
#print(cmd)
#os.system(cmd)
#cmd = "mv " + isceFile + ".rsc " + os.path.join(os.path.dirname(isceFile) , "filt_" + dates[0][2:] + '_' + dates[1][2:] + "." + filePattern.split(".")[-1] + ".rsc")
#os.system(cmd)
def read_baseline(baselineFile):
b=[]
#bDict = dict(np.loadtxt(baselineFile, dtype=str, usecols=(0,1)))
f = open(baselineFile)
for line in f:
l = line.split()
b.append(float(l[1]))
# if l[0] == "Bperp (average)":
# b.append(float(l[1]))
return np.mean(b)
#print(bDict)
#return (bDict['PERP_BASELINE_BOTTOM']+bDict['PERP_BASELINE_TOP'])/2.0
def baselineTimeseries(baselineDir):
bFiles = glob.glob(os.path.join(baselineDir,'*.txt'))
bFiles = sorted(bFiles)
bDict={}
for bFile in bFiles:
dates = os.path.basename(bFile).split('.txt')[0].split('_')
bDict[dates[1]] = read_baseline(bFile)
bDict[dates[0]] = 0
return bDict
def prepare_geometry(geometryDir):
demFile = os.path.join(geometryDir, 'hgt.rdr')
latFile = os.path.join(geometryDir, 'lat.rdr')
lonFile = os.path.join(geometryDir, 'lon.rdr')
ds = gdal.Open(demFile, gdal.GA_ReadOnly)
length = ds.RasterYSize
width = ds.RasterXSize
lat = np.memmap(latFile, dtype=np.float64, mode='r', shape=(length,width))
lon = np.memmap(latFile, dtype=np.float64, mode='r', shape=(length,width))
print(lat[0,0], lat[0,width-1], lat[length-1,0], lat[length-1,width-1])
print(lon[0,0], lon[0,width-1], lon[length-1,0], lon[length-1,width-1])
lat = None
lon = None
def main(iargs=None):
inps = cmdLineParse(iargs)
baselineDict = baselineTimeseries(inps.baselineDir)
metadata = extractIsceMetadata(os.path.join(inps.shelveDir, 'data'))
metadata['orbitDirection'] = inps.orbitDirection
for namePattern in inps.fileList:
print(namePattern)
prepare_stack(inps.input, namePattern, metadata, baselineDict)
#prepare_geometry(inps.geometryDir)
if __name__ == '__main__':
'''
Main driver.
'''
main()
|
the-stack_0_18717 | # -*- coding=utf-8 -*-
import atexit
import contextlib
import copy
import functools
import os
from pipenv.vendor import attr
import packaging.markers
import packaging.version
import pip_shims.shims
import requests
from packaging.utils import canonicalize_name
from vistir.compat import JSONDecodeError, fs_str
from vistir.contextmanagers import cd, temp_environ
from vistir.path import create_tracked_tempdir
from ..environment import MYPY_RUNNING
from ..utils import _ensure_dir, prepare_pip_source_args
from .cache import CACHE_DIR, DependencyCache
from .setup_info import SetupInfo
from .utils import (
clean_requires_python,
fix_requires_python_marker,
format_requirement,
full_groupby,
is_pinned_requirement,
key_from_ireq,
make_install_requirement,
name_from_req,
version_from_ireq,
)
try:
from contextlib import ExitStack
except ImportError:
from contextlib2 import ExitStack
if MYPY_RUNNING:
from typing import (
Any,
Dict,
List,
Generator,
Optional,
Union,
Tuple,
TypeVar,
Text,
Set,
)
from pip_shims.shims import (
InstallRequirement,
InstallationCandidate,
PackageFinder,
Command,
)
from packaging.requirements import Requirement as PackagingRequirement
from packaging.markers import Marker
TRequirement = TypeVar("TRequirement")
RequirementType = TypeVar(
"RequirementType", covariant=True, bound=PackagingRequirement
)
MarkerType = TypeVar("MarkerType", covariant=True, bound=Marker)
STRING_TYPE = Union[str, bytes, Text]
S = TypeVar("S", bytes, str, Text)
PKGS_DOWNLOAD_DIR = fs_str(os.path.join(CACHE_DIR, "pkgs"))
WHEEL_DOWNLOAD_DIR = fs_str(os.path.join(CACHE_DIR, "wheels"))
DEPENDENCY_CACHE = DependencyCache()
@contextlib.contextmanager
def _get_wheel_cache():
with pip_shims.shims.global_tempdir_manager():
yield pip_shims.shims.WheelCache(
CACHE_DIR, pip_shims.shims.FormatControl(set(), set())
)
def _get_filtered_versions(ireq, versions, prereleases):
return set(ireq.specifier.filter(versions, prereleases=prereleases))
def find_all_matches(finder, ireq, pre=False):
# type: (PackageFinder, InstallRequirement, bool) -> List[InstallationCandidate]
"""Find all matching dependencies using the supplied finder and the
given ireq.
:param finder: A package finder for discovering matching candidates.
:type finder: :class:`~pip._internal.index.PackageFinder`
:param ireq: An install requirement.
:type ireq: :class:`~pip._internal.req.req_install.InstallRequirement`
:return: A list of matching candidates.
:rtype: list[:class:`~pip._internal.index.InstallationCandidate`]
"""
candidates = clean_requires_python(finder.find_all_candidates(ireq.name))
versions = {candidate.version for candidate in candidates}
allowed_versions = _get_filtered_versions(ireq, versions, pre)
if not pre and not allowed_versions:
allowed_versions = _get_filtered_versions(ireq, versions, True)
candidates = {c for c in candidates if c.version in allowed_versions}
return candidates
def get_pip_command():
# type: () -> Command
# Use pip's parser for pip.conf management and defaults.
# General options (find_links, index_url, extra_index_url, trusted_host,
# and pre) are defered to pip.
pip_command = pip_shims.shims.InstallCommand()
return pip_command
@attr.s
class AbstractDependency(object):
name = attr.ib() # type: STRING_TYPE
specifiers = attr.ib()
markers = attr.ib()
candidates = attr.ib()
requirement = attr.ib()
parent = attr.ib()
finder = attr.ib()
dep_dict = attr.ib(default=attr.Factory(dict))
@property
def version_set(self):
"""Return the set of versions for the candidates in this abstract dependency.
:return: A set of matching versions
:rtype: set(str)
"""
if len(self.candidates) == 1:
return set()
return set(packaging.version.parse(version_from_ireq(c)) for c in self.candidates)
def compatible_versions(self, other):
"""Find compatible version numbers between this abstract
dependency and another one.
:param other: An abstract dependency to compare with.
:type other: :class:`~requirementslib.models.dependency.AbstractDependency`
:return: A set of compatible version strings
:rtype: set(str)
"""
if len(self.candidates) == 1 and next(iter(self.candidates)).editable:
return self
elif len(other.candidates) == 1 and next(iter(other.candidates)).editable:
return other
return self.version_set & other.version_set
def compatible_abstract_dep(self, other):
"""Merge this abstract dependency with another one.
Return the result of the merge as a new abstract dependency.
:param other: An abstract dependency to merge with
:type other: :class:`~requirementslib.models.dependency.AbstractDependency`
:return: A new, combined abstract dependency
:rtype: :class:`~requirementslib.models.dependency.AbstractDependency`
"""
from .requirements import Requirement
if len(self.candidates) == 1 and next(iter(self.candidates)).editable:
return self
elif len(other.candidates) == 1 and next(iter(other.candidates)).editable:
return other
new_specifiers = self.specifiers & other.specifiers
markers = set(self.markers) if self.markers else set()
if other.markers:
markers.add(other.markers)
new_markers = None
if markers:
new_markers = packaging.markers.Marker(
" or ".join(str(m) for m in sorted(markers))
)
new_ireq = copy.deepcopy(self.requirement.ireq)
new_ireq.req.specifier = new_specifiers
new_ireq.req.marker = new_markers
new_requirement = Requirement.from_line(format_requirement(new_ireq))
compatible_versions = self.compatible_versions(other)
if isinstance(compatible_versions, AbstractDependency):
return compatible_versions
candidates = [
c
for c in self.candidates
if packaging.version.parse(version_from_ireq(c)) in compatible_versions
]
dep_dict = {}
candidate_strings = [format_requirement(c) for c in candidates]
for c in candidate_strings:
if c in self.dep_dict:
dep_dict[c] = self.dep_dict.get(c)
return AbstractDependency(
name=self.name,
specifiers=new_specifiers,
markers=new_markers,
candidates=candidates,
requirement=new_requirement,
parent=self.parent,
dep_dict=dep_dict,
finder=self.finder,
)
def get_deps(self, candidate):
"""Get the dependencies of the supplied candidate.
:param candidate: An installrequirement
:type candidate: :class:`~pip._internal.req.req_install.InstallRequirement`
:return: A list of abstract dependencies
:rtype: list[:class:`~requirementslib.models.dependency.AbstractDependency`]
"""
key = format_requirement(candidate)
if key not in self.dep_dict:
from .requirements import Requirement
req = Requirement.from_line(key)
req = req.merge_markers(self.markers)
self.dep_dict[key] = req.get_abstract_dependencies()
return self.dep_dict[key]
@classmethod
def from_requirement(cls, requirement, parent=None):
"""Creates a new :class:`~requirementslib.models.dependency.AbstractDependency`
from a :class:`~requirementslib.models.requirements.Requirement` object.
This class is used to find all candidates matching a given set of specifiers
and a given requirement.
:param requirement: A requirement for resolution
:type requirement: :class:`~requirementslib.models.requirements.Requirement` object.
"""
name = requirement.normalized_name
specifiers = requirement.ireq.specifier if not requirement.editable else ""
markers = requirement.ireq.markers
extras = requirement.ireq.extras
is_pinned = is_pinned_requirement(requirement.ireq)
is_constraint = bool(parent)
_, finder = get_finder(sources=None)
candidates = []
if not is_pinned and not requirement.editable:
for r in requirement.find_all_matches(finder=finder):
req = make_install_requirement(
name,
r.version,
extras=extras,
markers=markers,
constraint=is_constraint,
)
req.req.link = getattr(r, "location", getattr(r, "link", None))
req.parent = parent
candidates.append(req)
candidates = sorted(
set(candidates),
key=lambda k: packaging.version.parse(version_from_ireq(k)),
)
else:
candidates = [requirement.ireq]
return cls(
name=name,
specifiers=specifiers,
markers=markers,
candidates=candidates,
requirement=requirement,
parent=parent,
finder=finder,
)
@classmethod
def from_string(cls, line, parent=None):
from .requirements import Requirement
req = Requirement.from_line(line)
abstract_dep = cls.from_requirement(req, parent=parent)
return abstract_dep
def get_abstract_dependencies(reqs, sources=None, parent=None):
"""Get all abstract dependencies for a given list of requirements.
Given a set of requirements, convert each requirement to an Abstract Dependency.
:param reqs: A list of Requirements
:type reqs: list[:class:`~requirementslib.models.requirements.Requirement`]
:param sources: Pipfile-formatted sources, defaults to None
:param sources: list[dict], optional
:param parent: The parent of this list of dependencies, defaults to None
:param parent: :class:`~requirementslib.models.requirements.Requirement`, optional
:return: A list of Abstract Dependencies
:rtype: list[:class:`~requirementslib.models.dependency.AbstractDependency`]
"""
deps = []
from .requirements import Requirement
for req in reqs:
if isinstance(req, pip_shims.shims.InstallRequirement):
requirement = Requirement.from_line("{0}{1}".format(req.name, req.specifier))
if req.link:
requirement.req.link = req.link
requirement.markers = req.markers
requirement.req.markers = req.markers
requirement.extras = req.extras
requirement.req.extras = req.extras
elif isinstance(req, Requirement):
requirement = copy.deepcopy(req)
else:
requirement = Requirement.from_line(req)
dep = AbstractDependency.from_requirement(requirement, parent=parent)
deps.append(dep)
return deps
def get_dependencies(ireq, sources=None, parent=None):
# type: (Union[InstallRequirement, InstallationCandidate], Optional[List[Dict[S, Union[S, bool]]]], Optional[AbstractDependency]) -> Set[S, ...]
"""Get all dependencies for a given install requirement.
:param ireq: A single InstallRequirement
:type ireq: :class:`~pip._internal.req.req_install.InstallRequirement`
:param sources: Pipfile-formatted sources, defaults to None
:type sources: list[dict], optional
:param parent: The parent of this list of dependencies, defaults to None
:type parent: :class:`~pip._internal.req.req_install.InstallRequirement`
:return: A set of dependency lines for generating new InstallRequirements.
:rtype: set(str)
"""
if not isinstance(ireq, pip_shims.shims.InstallRequirement):
name = getattr(ireq, "project_name", getattr(ireq, "project", ireq.name))
version = getattr(ireq, "version", None)
if not version:
ireq = pip_shims.shims.InstallRequirement.from_line("{0}".format(name))
else:
ireq = pip_shims.shims.InstallRequirement.from_line(
"{0}=={1}".format(name, version)
)
pip_options = get_pip_options(sources=sources)
getters = [
get_dependencies_from_cache,
get_dependencies_from_wheel_cache,
get_dependencies_from_json,
functools.partial(get_dependencies_from_index, pip_options=pip_options),
]
for getter in getters:
deps = getter(ireq)
if deps is not None:
return deps
raise RuntimeError("failed to get dependencies for {}".format(ireq))
def get_dependencies_from_wheel_cache(ireq):
# type: (pip_shims.shims.InstallRequirement) -> Optional[Set[pip_shims.shims.InstallRequirement]]
"""Retrieves dependencies for the given install requirement from the wheel cache.
:param ireq: A single InstallRequirement
:type ireq: :class:`~pip._internal.req.req_install.InstallRequirement`
:return: A set of dependency lines for generating new InstallRequirements.
:rtype: set(str) or None
"""
if ireq.editable or not is_pinned_requirement(ireq):
return
with _get_wheel_cache() as wheel_cache:
matches = wheel_cache.get(ireq.link, name_from_req(ireq.req))
if matches:
matches = set(matches)
if not DEPENDENCY_CACHE.get(ireq):
DEPENDENCY_CACHE[ireq] = [format_requirement(m) for m in matches]
return matches
return None
def _marker_contains_extra(ireq):
# TODO: Implement better parsing logic avoid false-positives.
return "extra" in repr(ireq.markers)
def get_dependencies_from_json(ireq):
"""Retrieves dependencies for the given install requirement from the json api.
:param ireq: A single InstallRequirement
:type ireq: :class:`~pip._internal.req.req_install.InstallRequirement`
:return: A set of dependency lines for generating new InstallRequirements.
:rtype: set(str) or None
"""
if ireq.editable or not is_pinned_requirement(ireq):
return
# It is technically possible to parse extras out of the JSON API's
# requirement format, but it is such a chore let's just use the simple API.
if ireq.extras:
return
session = requests.session()
atexit.register(session.close)
version = str(ireq.req.specifier).lstrip("=")
def gen(ireq):
info = None
try:
info = session.get(
"https://pypi.org/pypi/{0}/{1}/json".format(ireq.req.name, version)
).json()["info"]
finally:
session.close()
requires_dist = info.get("requires_dist", info.get("requires"))
if not requires_dist: # The API can return None for this.
return
for requires in requires_dist:
i = pip_shims.shims.InstallRequirement.from_line(requires)
# See above, we don't handle requirements with extras.
if not _marker_contains_extra(i):
yield format_requirement(i)
if ireq not in DEPENDENCY_CACHE:
try:
reqs = DEPENDENCY_CACHE[ireq] = list(gen(ireq))
except JSONDecodeError:
return
req_iter = iter(reqs)
else:
req_iter = gen(ireq)
return set(req_iter)
def get_dependencies_from_cache(ireq):
"""Retrieves dependencies for the given install requirement from the dependency cache.
:param ireq: A single InstallRequirement
:type ireq: :class:`~pip._internal.req.req_install.InstallRequirement`
:return: A set of dependency lines for generating new InstallRequirements.
:rtype: set(str) or None
"""
if ireq.editable or not is_pinned_requirement(ireq):
return
if ireq not in DEPENDENCY_CACHE:
return
cached = set(DEPENDENCY_CACHE[ireq])
# Preserving sanity: Run through the cache and make sure every entry if
# valid. If this fails, something is wrong with the cache. Drop it.
try:
broken = False
for line in cached:
dep_ireq = pip_shims.shims.InstallRequirement.from_line(line)
name = canonicalize_name(dep_ireq.name)
if _marker_contains_extra(dep_ireq):
broken = True # The "extra =" marker breaks everything.
elif name == canonicalize_name(ireq.name):
broken = True # A package cannot depend on itself.
if broken:
break
except Exception:
broken = True
if broken:
del DEPENDENCY_CACHE[ireq]
return
return cached
def is_python(section):
return section.startswith("[") and ":" in section
def get_dependencies_from_index(dep, sources=None, pip_options=None, wheel_cache=None):
"""Retrieves dependencies for the given install requirement from the pip resolver.
:param dep: A single InstallRequirement
:type dep: :class:`~pip._internal.req.req_install.InstallRequirement`
:param sources: Pipfile-formatted sources, defaults to None
:type sources: list[dict], optional
:return: A set of dependency lines for generating new InstallRequirements.
:rtype: set(str) or None
"""
session, finder = get_finder(sources=sources, pip_options=pip_options)
dep.is_direct = True
requirements = None
setup_requires = {}
with temp_environ(), ExitStack() as stack:
if not wheel_cache:
wheel_cache = stack.enter_context(_get_wheel_cache())
os.environ["PIP_EXISTS_ACTION"] = "i"
if dep.editable and not dep.prepared and not dep.req:
setup_info = SetupInfo.from_ireq(dep)
results = setup_info.get_info()
setup_requires.update(results["setup_requires"])
requirements = set(results["requires"].values())
else:
results = pip_shims.shims.resolve(dep)
requirements = [v for v in results.values() if v.name != dep.name]
requirements = set([format_requirement(r) for r in requirements])
if not dep.editable and is_pinned_requirement(dep) and requirements is not None:
DEPENDENCY_CACHE[dep] = list(requirements)
return requirements
def get_pip_options(args=[], sources=None, pip_command=None):
"""Build a pip command from a list of sources
:param args: positional arguments passed through to the pip parser
:param sources: A list of pipfile-formatted sources, defaults to None
:param sources: list[dict], optional
:param pip_command: A pre-built pip command instance
:type pip_command: :class:`~pip._internal.cli.base_command.Command`
:return: An instance of pip_options using the supplied arguments plus sane defaults
:rtype: :class:`~pip._internal.cli.cmdoptions`
"""
if not pip_command:
pip_command = get_pip_command()
if not sources:
sources = [{"url": "https://pypi.org/simple", "name": "pypi", "verify_ssl": True}]
_ensure_dir(CACHE_DIR)
pip_args = args
pip_args = prepare_pip_source_args(sources, pip_args)
pip_options, _ = pip_command.parser.parse_args(pip_args)
pip_options.cache_dir = CACHE_DIR
return pip_options
def get_finder(sources=None, pip_command=None, pip_options=None):
# type: (List[Dict[S, Union[S, bool]]], Optional[Command], Any) -> PackageFinder
"""Get a package finder for looking up candidates to install
:param sources: A list of pipfile-formatted sources, defaults to None
:param sources: list[dict], optional
:param pip_command: A pip command instance, defaults to None
:type pip_command: :class:`~pip._internal.cli.base_command.Command`
:param pip_options: A pip options, defaults to None
:type pip_options: :class:`~pip._internal.cli.cmdoptions`
:return: A package finder
:rtype: :class:`~pip._internal.index.PackageFinder`
"""
if not pip_command:
pip_command = pip_shims.shims.InstallCommand()
if not sources:
sources = [{"url": "https://pypi.org/simple", "name": "pypi", "verify_ssl": True}]
if not pip_options:
pip_options = get_pip_options(sources=sources, pip_command=pip_command)
session = pip_command._build_session(pip_options)
atexit.register(session.close)
finder = pip_shims.shims.get_package_finder(
pip_shims.shims.InstallCommand(), options=pip_options, session=session
)
return session, finder
@contextlib.contextmanager
def start_resolver(finder=None, session=None, wheel_cache=None):
"""Context manager to produce a resolver.
:param finder: A package finder to use for searching the index
:type finder: :class:`~pip._internal.index.PackageFinder`
:param :class:`~requests.Session` session: A session instance
:param :class:`~pip._internal.cache.WheelCache` wheel_cache: A pip WheelCache instance
:return: A 3-tuple of finder, preparer, resolver
:rtype: (:class:`~pip._internal.operations.prepare.RequirementPreparer`, :class:`~pip._internal.resolve.Resolver`)
"""
pip_command = get_pip_command()
pip_options = get_pip_options(pip_command=pip_command)
session = None
if not finder:
session, finder = get_finder(pip_command=pip_command, pip_options=pip_options)
if not session:
session = pip_command._build_session(pip_options)
download_dir = PKGS_DOWNLOAD_DIR
_ensure_dir(download_dir)
_build_dir = create_tracked_tempdir(fs_str("build"))
_source_dir = create_tracked_tempdir(fs_str("source"))
try:
with ExitStack() as ctx:
ctx.enter_context(pip_shims.shims.global_tempdir_manager())
if not wheel_cache:
wheel_cache = ctx.enter_context(_get_wheel_cache())
_ensure_dir(fs_str(os.path.join(wheel_cache.cache_dir, "wheels")))
preparer = ctx.enter_context(
pip_shims.shims.make_preparer(
options=pip_options,
finder=finder,
session=session,
build_dir=_build_dir,
src_dir=_source_dir,
download_dir=download_dir,
wheel_download_dir=WHEEL_DOWNLOAD_DIR,
progress_bar="off",
build_isolation=False,
install_cmd=pip_command,
)
)
resolver = pip_shims.shims.get_resolver(
finder=finder,
ignore_dependencies=False,
ignore_requires_python=True,
preparer=preparer,
session=session,
options=pip_options,
install_cmd=pip_command,
wheel_cache=wheel_cache,
force_reinstall=True,
ignore_installed=True,
upgrade_strategy="to-satisfy-only",
isolated=False,
use_user_site=False,
)
yield resolver
finally:
session.close()
def get_grouped_dependencies(constraints):
# We need to track what contributed a specifierset
# as well as which specifiers were required by the root node
# in order to resolve any conflicts when we are deciding which thing to backtrack on
# then we take the loose match (which _is_ flexible) and start moving backwards in
# versions by popping them off of a stack and checking for the conflicting package
for _, ireqs in full_groupby(constraints, key=key_from_ireq):
ireqs = sorted(ireqs, key=lambda ireq: ireq.editable)
editable_ireq = next(iter(ireq for ireq in ireqs if ireq.editable), None)
if editable_ireq:
yield editable_ireq # only the editable match mattters, ignore all others
continue
ireqs = iter(ireqs)
# deepcopy the accumulator so as to not modify the self.our_constraints invariant
combined_ireq = copy.deepcopy(next(ireqs))
for ireq in ireqs:
# NOTE we may be losing some info on dropped reqs here
try:
combined_ireq.req.specifier &= ireq.req.specifier
except TypeError:
if ireq.req.specifier._specs and not combined_ireq.req.specifier._specs:
combined_ireq.req.specifier._specs = ireq.req.specifier._specs
combined_ireq.constraint &= ireq.constraint
if not combined_ireq.markers:
combined_ireq.markers = ireq.markers
else:
_markers = combined_ireq.markers._markers
if not isinstance(_markers[0], (tuple, list)):
combined_ireq.markers._markers = [
_markers,
"and",
ireq.markers._markers,
]
# Return a sorted, de-duped tuple of extras
combined_ireq.extras = tuple(
sorted(set(tuple(combined_ireq.extras) + tuple(ireq.extras)))
)
yield combined_ireq
|
the-stack_0_18719 | import ConfigParser
from os import path
import pytest
import yaml
from infrared import api
from infrared.core.services import plugins
import tests
from tests.test_workspace import workspace_manager_fixture, test_workspace # noqa
def subdict_in_dict(subdict, superdict):
"""True is subdict in subdict_in_dict. Else False.
>>> subdict_in_dict({"k1": "v1"}, {"k1": "v1"})
True
>>> subdict_in_dict({"k1": "v1"}, {"k1": "v1", "k2": "v2"})
True
>>> subdict_in_dict({}, {"k1": "v1"})
True
>>> subdict_in_dict({"k1": "v1"}, {})
False
"""
return all(item in superdict.items()
for item in subdict.items())
@pytest.fixture(scope="session")
def spec_fixture():
"""Generates plugin spec for testing, using tests/example plugin dir. """
plugin_dir = path.join(path.abspath(path.dirname(tests.__file__)),
'example')
test_plugin = plugins.InfraredPlugin(plugin_dir=plugin_dir)
from infrared.api import InfraredPluginsSpec
spec = InfraredPluginsSpec(test_plugin)
yield spec
def test_execute_no_workspace(spec_fixture, workspace_manager_fixture): # noqa
"""Verify new workspace was been created when there are no workspaces. """
spec_manager = api.SpecManager()
spec_manager.register_spec(spec_fixture)
input_string = ['example']
spec_manager.run_specs(args=input_string)
assert workspace_manager_fixture.get_active_workspace()
def test_execute_fail(spec_fixture, workspace_manager_fixture, # noqa
test_workspace):
"""Verify execution fails as expected with CLI input. """
input_string = ['example', "--foo-bar", "fail"]
spec_manager = api.SpecManager()
spec_manager.register_spec(spec_fixture)
inventory_dir = test_workspace.path
output_file = "output.example"
assert not path.exists(path.join(inventory_dir, output_file))
workspace_manager_fixture.activate(test_workspace.name)
return_value = spec_manager.run_specs(args=input_string)
# Assert return code != 0
assert return_value
assert not path.exists(path.join(inventory_dir, output_file))
def test_execute_main(spec_fixture, workspace_manager_fixture, # noqa
test_workspace):
"""Verify execution runs the main.yml playbook.
Implicitly covers that vars dict is passed, since we know it will fail
on task "fail if no vars dict" because test_test_execute_fail verifies
failure is respected and output file isn't generated.
Verifies that plugin roles are invoked properly.
"""
input_string = ['example']
spec_manager = api.SpecManager()
spec_manager.register_spec(spec_fixture)
inventory_dir = test_workspace.path
output_file = "output.example"
assert not path.exists(path.join(inventory_dir, output_file))
assert not path.exists(path.join(inventory_dir, "role_" + output_file))
workspace_manager_fixture.activate(test_workspace.name)
return_value = spec_manager.run_specs(args=input_string)
assert return_value == 0
assert path.exists(path.join(inventory_dir, output_file))
assert path.exists(path.join(
inventory_dir,
"role_" + output_file)), "Plugin role not invoked"
def test_fake_inventory(spec_fixture, workspace_manager_fixture, # noqa
test_workspace):
"""Verify "--inventory" updates workspace's inventory. """
input_string = ['example', '--inventory', 'fake']
spec_manager = api.SpecManager()
spec_manager.register_spec(spec_fixture)
inventory_dir = test_workspace.path
output_file = "output.example"
assert not path.exists(path.join(inventory_dir, output_file))
workspace_manager_fixture.activate(test_workspace.name)
with pytest.raises(IOError) as exc:
spec_manager.run_specs(args=input_string)
assert exc.value.message == "File not found: fake"
def test_bad_user_inventory(spec_fixture, workspace_manager_fixture, # noqa
test_workspace, tmpdir):
"""Verify user-inventory is loaded and not default inventory.
tests/example/main.yml playbook runs on all hosts. New inventory defines
unreachable node.
"""
fake_inventory = tmpdir.mkdir("ir_dir").join("fake_hosts_file")
fake_inventory.write("host2")
test_workspace.inventory = str(fake_inventory)
input_string = ['example', '--inventory', str(fake_inventory)]
spec_manager = api.SpecManager()
spec_manager.register_spec(spec_fixture)
inventory_dir = test_workspace.path
output_file = "output.example"
assert not path.exists(path.join(inventory_dir, output_file))
workspace_manager_fixture.activate(test_workspace.name)
return_value = spec_manager.run_specs(args=input_string)
assert return_value
@pytest.mark.parametrize("input_value", ["explicit", ""]) # noqa
def test_nested_value_CLI(spec_fixture,
workspace_manager_fixture,
test_workspace, input_value, tmpdir):
"""Tests that CLI input of Complex type Value is nested in vars dict.
Use "-o output_file" and evaluate output YAML file.
"""
dry_output = tmpdir.mkdir("tmp").join("dry_output.yml")
if input_value:
input_string = ['example', '--foo-bar', input_value]
else:
input_string = ['example']
input_string.extend(["-o", str(dry_output)])
# if no input, check that default value is loaded
expected_output_dict = {"foo": {"bar": input_value or "default string"}}
spec_manager = api.SpecManager()
spec_manager.register_spec(spec_fixture)
inventory_dir = test_workspace.path
output_file = "output.example"
assert not path.exists(path.join(inventory_dir, output_file))
workspace_manager_fixture.activate(test_workspace.name)
return_value = spec_manager.run_specs(args=input_string)
assert return_value == 0
assert path.exists(path.join(inventory_dir, output_file))
output_dict = yaml.load(dry_output.read())["provision"]
# asserts expected_output_dict is subset of output
assert subdict_in_dict(
expected_output_dict,
output_dict), "expected:{} actual:{}".format(expected_output_dict,
output_dict)
@pytest.mark.parametrize("input_args, expected_output_dict", # noqa
[
# No spaces
(["--extra-vars=key=val"],
{"key": "val"}),
# Single var
(["--extra-vars", "key=val"],
{"key": "val"}),
# multiple usage
(["--extra-vars", "key=val",
"-e", "another.key=val1",
"-e", "another.key2=val2"],
{"key": "val",
"another": {"key": "val1",
"key2": "val2"}}),
# nested vars
(["--extra-vars", "nested.key=val"],
{"nested": {"key": "val"}}),
# Mixed with spec input
(["--foo-bar", "val1",
"--extra-vars", "provision.foo.key=val2"],
{"provision": {"foo": {"bar": "val1",
"key": "val2"}}}),
])
def test_extra_vars(spec_fixture,
workspace_manager_fixture,
test_workspace, input_args, expected_output_dict, tmpdir):
"""Tests that "--extra-vars" are inserted to vars_dict. """
dry_output = tmpdir.mkdir("tmp").join("dry_output.yml")
input_string = ['example'] + input_args + ["-o", str(dry_output),
"--dry-run"]
spec_manager = api.SpecManager()
spec_manager.register_spec(spec_fixture)
workspace_manager_fixture.activate(test_workspace.name)
return_value = spec_manager.run_specs(args=input_string)
# dry run returns None
assert return_value is None
output_dict = yaml.load(dry_output.read())
# asserts expected_output_dict is subset of output
assert subdict_in_dict(
expected_output_dict,
output_dict), "expected:{} actual:{}".format(expected_output_dict,
output_dict)
@pytest.mark.parametrize("input_args, file_dicts, expected_output_dict", # noqa
[
# No spaces
(["--extra-vars=@dict_file"],
[{"filename": "dict_file",
"content": {"key": "val"}}],
{"key": "val"}),
# Single var
(["--extra-vars", "@dict_file"],
[{"filename": "dict_file",
"content": {"key": "val"}}],
{"key": "val"}),
# multiple usage
(["--extra-vars", "key=val",
"-e", "another.key=val1",
"-e", "another.key2=val2",
"--extra-vars", "@dict_file1",
"--extra-vars", "@dict_file2"],
[{"filename": "dict_file1",
"content": {"file-key": "file-val"}},
{"filename": "dict_file2",
"content": {"file-key-list": ["a", "b"]}}],
{"key": "val",
"another": {"key": "val1",
"key2": "val2"},
"file-key": "file-val",
"file-key-list": ["a", "b"]}),
# Mixed with spec input
(["--foo-bar", "val1",
"--extra-vars", "@dict_file"],
[{"filename": "dict_file",
"content":
{"provision": {"foo": {"key": "val2"}}}}],
{"provision": {"foo": {"bar": "val1",
"key": "val2"}}}),
])
def test_extra_vars_with_file(spec_fixture,
workspace_manager_fixture,
test_workspace, input_args,
file_dicts, expected_output_dict, tmpdir):
"""Tests that extra-vars supports yaml file with "@". """
tmp_dir = tmpdir.mkdir("tmp")
dry_output = tmp_dir.join("dry_output.yml")
for file_dict in file_dicts:
tmp_file = tmp_dir.join(file_dict["filename"])
# write dict to tmp yaml file
with open(str(tmp_file), 'wb') as yaml_file:
yaml_file.write(yaml.safe_dump(file_dict["content"],
default_flow_style=False))
# Inject full file path to command
for i, arg in enumerate(input_args):
input_args[i] = arg.replace(file_dict["filename"],
str(tmp_file))
input_string = ['example'] + input_args + ["-o", str(dry_output),
"--dry-run"]
spec_manager = api.SpecManager()
spec_manager.register_spec(spec_fixture)
workspace_manager_fixture.activate(test_workspace.name)
return_value = spec_manager.run_specs(args=input_string)
# dry run returns None
assert return_value is None
output_dict = yaml.load(dry_output.read())
# asserts expected_output_dict is subset of output
assert subdict_in_dict(
expected_output_dict,
output_dict), "expected:{} actual:{}".format(expected_output_dict,
output_dict)
@pytest.mark.parametrize("input_value, expected_output_dict", [ # noqa
# DEFAULT style
[
['--dictionary-val', "option1:value1"],
{"dictionary": {"val": {"option1": "value1"}}}
],
[
['--dictionary-val=option1:value1,option2:value2'],
{"dictionary": {"val": {"option1": "value1",
"option2": "value2"}}}
],
[
['--dictionary-val', 'option1:value1,option2:value2'],
{"dictionary": {"val": {"option1": "value1",
"option2": "value2"}}}
],
[
['--dictionary-val', 'my-nice.key:some_value,option2:value2'],
{"dictionary": {"val": {"my-nice.key": "some_value",
"option2": "value2"}}}
],
])
def test_nested_KeyValueList_CLI(spec_fixture,
workspace_manager_fixture,
test_workspace, tmpdir,
input_value, expected_output_dict):
"""Tests that CLI input of Complex type KeyValueList is nested in vars dict.
Use "-o output_file" and evaluate output YAML file.
"""
dry_output = tmpdir.mkdir("tmp").join("dry_output.yml")
input_string = ['example'] + input_value
input_string.extend(["-o", str(dry_output)])
spec_manager = api.SpecManager()
spec_manager.register_spec(spec_fixture)
inventory_dir = test_workspace.path
output_file = "output.example"
assert not path.exists(path.join(inventory_dir, output_file))
workspace_manager_fixture.activate(test_workspace.name)
return_value = spec_manager.run_specs(args=input_string)
assert return_value == 0
assert path.exists(path.join(inventory_dir, output_file))
output_dict = yaml.load(dry_output.read())["provision"]
# asserts expected_output_dict is subset of output
assert subdict_in_dict(
expected_output_dict,
output_dict), "expected:{} actual:{}".format(expected_output_dict,
output_dict)
@pytest.mark.parametrize("bad_input", [ # noqa
"keyNoVal", "bad-input", # Key, no sign, no value, no sep
"KeyNoValSign2:", # Key, sign2 (':'), no value, no sep
"KeyNoValOtherSign@", # Key, other sign, no val, no spe
":value", # No key, sign2 (':'), value
"key:val,", # End with separator1 (',')
"ke*y:val", "key:v@al", # Invalid sign in key & val - default style
"k1:v1;k2:v2*blabla", # All input should be match - default style
"blabla(k1:v1;k2:v2", # All input should be match - default style
"k1:v1;blabla~k2:v2", # All input should be match - default style
])
def test_nested_KeyValueList_negative(
spec_fixture, workspace_manager_fixture, test_workspace, bad_input):
"""Tests that bad input for KeyValueList raises exception. """
input_string = list(('example', "--dry-run", "--dictionary-val"))
input_string.append(bad_input)
spec_manager = api.SpecManager()
spec_manager.register_spec(spec_fixture)
inventory_dir = test_workspace.path
output_file = "output.example"
assert not path.exists(path.join(inventory_dir, output_file))
workspace_manager_fixture.activate(test_workspace.name)
from infrared.core.utils import exceptions
with pytest.raises(exceptions.IRKeyValueListException):
spec_manager.run_specs(args=input_string)
@pytest.mark.parametrize("input_value", [ # noqa
["explicit", "--dry-run"],
["", "--dry-run"],
["explicit"],
[""],
])
def test_nested_value_dry_run(spec_fixture,
workspace_manager_fixture,
test_workspace, input_value):
"""Verifies that --dry-run doesn't run playbook. """
dry = "--dry-run" in input_value
if input_value:
input_string = ['example', '--foo-bar'] + input_value
else:
input_string = ['example']
spec_manager = api.SpecManager()
spec_manager.register_spec(spec_fixture)
inventory_dir = test_workspace.path
output_file = "output.example"
assert not path.exists(path.join(inventory_dir, output_file))
workspace_manager_fixture.activate(test_workspace.name)
return_value = spec_manager.run_specs(args=input_string)
assert return_value is None if dry else return_value == 0
# assert that playbook didn't run if "--dry-run" requested
assert not dry == path.exists(
path.join(inventory_dir, output_file))
@pytest.mark.parametrize("input_value", ["explicit", ""]) # noqa
def test_nested_value_CLI_with_answers_file(spec_fixture, tmpdir,
workspace_manager_fixture,
test_workspace, input_value):
"""Verfies answers file is loaded and that CLI overrides it.
Use "-o output_file" and evaluate output YAML file.
"""
mytempdir = tmpdir.mkdir("tmp")
dry_output = mytempdir.join("dry_output.yml")
config = ConfigParser.ConfigParser()
config.add_section('example')
config.set('example', 'foo-bar', 'from_answers_file')
answers_file = mytempdir.join("answers_file")
with open(str(answers_file), 'wb') as configfile:
config.write(configfile)
input_string = ['example', '--from-file', str(answers_file)]
if input_value:
input_string += ['--foo-bar', input_value]
input_string.extend(["-o", str(dry_output)])
# if no input, check that default value is loaded
expected_output_dict = {"foo": {"bar": input_value or 'from_answers_file'}}
spec_manager = api.SpecManager()
spec_manager.register_spec(spec_fixture)
inventory_dir = test_workspace.path
output_file = "output.example"
assert not path.exists(path.join(inventory_dir, output_file))
workspace_manager_fixture.activate(test_workspace.name)
return_value = spec_manager.run_specs(args=input_string)
assert return_value == 0
assert path.exists(path.join(inventory_dir, output_file))
output_dict = yaml.load(dry_output.read())["provision"]
# asserts expected_output_dict is subset of output
assert subdict_in_dict(
expected_output_dict,
output_dict), "expected:{} actual:{}".format(expected_output_dict,
output_dict)
def test_generate_answers_file(spec_fixture, workspace_manager_fixture, # noqa
test_workspace, tmpdir):
"""Verify answers-file is generated to destination. """
answers_file = tmpdir.mkdir("tmp").join("answers_file")
input_string = \
['example', '--generate-answers-file', str(answers_file), '--dry-run']
spec_manager = api.SpecManager()
spec_manager.register_spec(spec_fixture)
workspace_manager_fixture.activate(test_workspace.name)
return_value = spec_manager.run_specs(args=input_string)
assert return_value is None
config = ConfigParser.ConfigParser()
config.read(str(answers_file))
assert config.get("example", "foo-bar") == "default string"
# verify playbook didn't run
output_file = "output.example"
inventory_dir = test_workspace.path
assert not path.exists(path.join(inventory_dir, output_file))
def test_ansible_args(spec_fixture, workspace_manager_fixture, # noqa
test_workspace):
"""Verify execution runs with --ansible-args. """
input_string = ['example', '--ansible-args',
'start-at-task="Test output";tags=only_this']
spec_manager = api.SpecManager()
spec_manager.register_spec(spec_fixture)
inventory_dir = test_workspace.path
output_file = "output.example"
assert not path.exists(path.join(inventory_dir, output_file))
workspace_manager_fixture.activate(test_workspace.name)
return_value = spec_manager.run_specs(args=input_string)
assert return_value == 0
# Combination of tags and start-at-task should avoid the file creation
assert not path.exists(path.join(inventory_dir, output_file))
@pytest.mark.parametrize("cli_args, from_file, expected_output", [ # noqa
# Tests CLI (no section)
("--iniopt opt1=val1",
None,
{'opt1': 'val1'}),
# Tests CLI
("--iniopt sec1.opt1=val1",
None,
{'sec1': {'opt1': 'val1'}}),
# Tests CLI (multiple args)
("--iniopt sec1.opt1=val1 --iniopt sec1.opt2=val2",
None,
{'sec1': {'opt1': 'val1', 'opt2': 'val2'}}),
# Tests from-file
(None,
'tests/example/files/answers_file.ini',
{'sec1': {'opt1': 'f_val1', 'opt2': 'f_val2'},
'sec2': {'opt1': 'f_val3'}}),
# Tests CLI with from-file
("--iniopt secx.optx=valx",
'tests/example/files/answers_file.ini',
{'secx': {'optx': 'valx'}}),
])
def test_output_with_IniType(spec_fixture, tmpdir,
workspace_manager_fixture, test_workspace,
cli_args, from_file, expected_output):
"""Verifies the output file with IniType complex type args from CLI & file
"""
my_temp_dir = tmpdir.mkdir("tmp")
dry_output = my_temp_dir.join("dry_output.yml")
input_string = ['example', "--dry-run", "-o", str(dry_output)]
if from_file:
input_string += ['--from-file', from_file]
if cli_args:
input_string += cli_args.split()
spec_manager = api.SpecManager()
spec_manager.register_spec(spec_fixture)
workspace_manager_fixture.activate(test_workspace.name)
return_value = spec_manager.run_specs(args=input_string)
assert return_value is None
assert path.exists(dry_output.strpath),\
"Output file doesn't exit: {}".format(dry_output.strpath)
with open(dry_output.strpath) as fp:
loaded_yml = yaml.safe_load(fp)
assert loaded_yml['provision']['iniopt'] == expected_output
def test_deprecation(spec_fixture, workspace_manager_fixture, # noqa
test_workspace, tmpdir):
"""Verify execution runs with deprecated option """
my_temp_dir = tmpdir.mkdir("tmp")
deprecated_output = my_temp_dir.join("deprecated_output.yml")
deprecated_input_string = \
['example', '--deprecated-way', 'TestingValue', '--dry-run',
'-o', str(deprecated_output)]
output = my_temp_dir.join("output.yml")
input_string = \
['example', '--new-way', 'TestingValue', '--dry-run',
'-o', str(output)]
spec_manager = api.SpecManager()
spec_manager.register_spec(spec_fixture)
workspace_manager_fixture.activate(test_workspace.name)
spec_manager.run_specs(args=deprecated_input_string)
spec_manager.run_specs(args=input_string)
with open(deprecated_output.strpath) as fp:
deprecated_yml = yaml.safe_load(fp)["provision"]
with open(output.strpath) as fp:
new_yml = yaml.safe_load(fp)["provision"]
assert deprecated_yml.get('new', None).get('way', None) == 'TestingValue'
assert new_yml.get('new', None).get('way', None) == 'TestingValue'
|
the-stack_0_18721 | from features import Features
from classifiers import Classifiers
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split, KFold, cross_val_score, GridSearchCV
from sklearn.metrics import confusion_matrix
from sklearn import decomposition
from sklearn.preprocessing import StandardScaler
from warnings import simplefilter
pd.set_option('display.max_columns', 500)
pd.set_option('display.max_rows', 500)
np.set_printoptions(precision=4, suppress=True)
simplefilter(action='ignore', category=FutureWarning)
# def crosstab_stats(test, pred):
# crosstab = pd.crosstab(test, pred, rownames=['Actual'], colnames=['Predicted'])
# print()
# print("************ Test results ************")
# print("Crosstab: ")
# print(crosstab)
# try:
# tp = crosstab.iloc[0][0]
# except:
# tp = 0
# try:
# fn = crosstab.iloc[0][1]
# except:
# fn = 0
# try:
# fp = crosstab.iloc[1][0]
# except:
# fp = 0
# try:
# tn = crosstab.iloc[1][1]
# except:
# tn = 0
# print(tp, fp)
# print(fn, tn)
# accuracy = (tp + tn) / (tp + fn + fp + tn)
# precision = (tp) / (tp + fn)
# recall = (tp) / (tp + fp)
# f1score = 2 * precision * recall / (precision + recall)
# print()
# print("Accuracy: ", accuracy)
# print("Precision: ", precision)
# print("Recall: ", recall)
# print("f1score: ", f1score)
def crosstab_stats(test, pred):
# print(confusion_matrix(test, pred))
TP, TN, FP, FN = 0, 0, 0, 0
for i in range(len(pred)):
if test[i] == pred[i] == 'Meal':
TP += 1
if pred[i] == 'Meal' and test[i] == 'NoMeal':
FP += 1
if test[i] == pred[i] == 'NoMeal':
TN += 1
if pred[i] == 'NoMeal' and test[i] == 'Meal':
FN += 1
print('TP: {}, TN: {}, FP: {}, FN: {}'.format(TP, TN, FP, FN))
accuracy = (TP + TN) / (TP + FN + FP + TN)
print("Accuracy: ", accuracy)
if TP != 0:
precision = (TP) / (TP + FN)
print("Precision: ", precision)
recall = (TP) / (TP + FP)
print("Recall: ", recall)
f1score = 2 * precision * recall / (precision + recall)
print("f1score: ", f1score)
# Drop column if count NaN is more than Values
def DropIfMaxNaN(df):
# for i in df.coluns:
return df.dropna(thresh=len(df) // 2, axis=1)
# Handling NAN and Reverse
def handlingNaN(df):
for i in list(df.columns):
A = np.array(df[i])
ok = np.logical_not(np.isnan(A))
xp = ok.ravel().nonzero()[0]
fp = A[np.logical_not(np.isnan(A))]
x = np.isnan(A).ravel().nonzero()[0]
A[np.isnan(A)] = np.interp(x, xp, fp)
df[i] = pd.DataFrame({'a': list(map(lambda x: round(x, 2), A))})['a']
return df
# TOP FEATURES (EIGEN VECTORS)
def TopFeatures(df, components):
x = StandardScaler().fit_transform(df.drop(['Class'], axis=1))
pca = decomposition.PCA(n_components=components)
pca2 = pca.fit(x)
return list(
map(
lambda x: x[1],
sorted(
zip(map(lambda x: max(x), pca2.components_), df.columns),
key=lambda x: x[0],
reverse=True
)[:5]
)
)
if __name__ == '__main__':
# To get all the files from local directory
numFiles = 5
meal = [pd.read_csv('MealNoMealData/mealData{}.csv'.format(i), header=None) for i in range(1, numFiles + 1)]
noMeal = [pd.read_csv('MealNoMealData/Nomeal{}.csv'.format(i), header=None) for i in range(1, numFiles + 1)]
for i in range(numFiles):
meal[i] = meal[i].T
noMeal[i] = noMeal[i].T
for i in range(numFiles):
meal[i] = DropIfMaxNaN(meal[i])
meal[i] = handlingNaN(meal[i])
for j in meal[i].columns:
meal[i][j] = list(meal[i][j])[::-1]
meal[i].columns = [i for i in range(len(meal[i].columns))]
noMeal[i] = DropIfMaxNaN(noMeal[i])
meal[i] = handlingNaN(noMeal[i])
for j in noMeal[i].columns:
noMeal[i][j] = list(noMeal[i][j])[::-1]
noMeal[i].columns = [i for i in range(len(noMeal[i].columns))]
DeviationFeature = pd.DataFrame(columns=['inRangeCount', 'LowCount', 'HighCount', 'LowMean', 'HighMean', 'Class'])
for i in range(numFiles):
DeviationFeature = DeviationFeature.append(Features.Deviation(meal[i], 'Meal'), ignore_index=True)
for i in range(numFiles):
DeviationFeature = DeviationFeature.append(Features.Deviation(noMeal[i], 'NoMeal'), ignore_index=True)
mean_range_feature = pd.DataFrame(columns=['MeanRange', 'Class'])
for i in range(numFiles):
mean_range_feature = mean_range_feature.append(Features.meanRange(meal[i], 'Meal'), ignore_index=True)
for i in range(numFiles):
mean_range_feature = mean_range_feature.append(Features.meanRange(noMeal[i], 'NoMeal'), ignore_index=True)
range_feature = pd.DataFrame(columns=['HighRange', 'LowRange', 'Class'])
for i in range(numFiles):
range_feature = range_feature.append(Features.Range(meal[i], 'Meal'), ignore_index=True)
for i in range(numFiles):
range_feature = range_feature.append(Features.Range(noMeal[i], 'NoMeal'), ignore_index=True)
fftFeature = pd.DataFrame(columns=['varFFT', 'sdFFT', 'meanFFT', 'Class'])
for i in range(numFiles):
fftFeature = fftFeature.append(Features.FFT(meal[i], 'Meal'), ignore_index=True)
for i in range(numFiles):
fftFeature = fftFeature.append(Features.FFT(noMeal[i], 'NoMeal'), ignore_index=True)
QuantileFeature = pd.DataFrame(columns=['Quantile', 'Class'])
for i in range(numFiles):
QuantileFeature = QuantileFeature.append(Features.Quantile(meal[i], 'Meal'), ignore_index=True)
for i in range(numFiles):
QuantileFeature = QuantileFeature.append(Features.Quantile(noMeal[i], 'NoMeal'), ignore_index=True)
# Feature Join
FeatureMatrix = pd.concat(
[
DeviationFeature,
mean_range_feature[['MeanRange']],
range_feature[['HighRange', 'LowRange']],
fftFeature[['varFFT', 'sdFFT', 'meanFFT']],
QuantileFeature['Quantile'],
],
axis=1
)
if int(input('Pass From PCA? 1: YES, 0: NO:\t')) == 1:
columns = TopFeatures(FeatureMatrix, len(FeatureMatrix.columns) - 1)
else:
columns = list(FeatureMatrix.columns)
columns.remove('Class')
# TRAINING SET
Input = np.array(FeatureMatrix[columns])
Output = np.array(FeatureMatrix['Class'])
inputTrain, inputTest, outputTrain, outputTest = train_test_split(Input, Output, test_size=0.3)
# cv = KFold(n_splits=10, random_state=42, shuffle=False)
# SVM
svc = Classifiers.SVC(inputTrain, outputTrain)
svc_scores = cross_val_score(svc, inputTrain, outputTrain, cv=10)
# linear_model
log = Classifiers.LOG(inputTrain, outputTrain)
log_scores = cross_val_score(log, inputTrain, outputTrain, cv=10)
# neighbors
knn = Classifiers.KNN(inputTrain, outputTrain)
knn_scores = cross_val_score(knn, inputTrain, outputTrain, cv=10)
# RandomForestClassifier
rfc = Classifiers.RFC(inputTrain, outputTrain)
rfc_scores = cross_val_score(rfc, inputTrain, outputTrain, cv=10)
# GaussianNB
gnb = Classifiers.Gaussian(inputTrain, outputTrain)
gnb_scores = cross_val_score(gnb, inputTrain, outputTrain, cv=10)
print("************ Model accuracies ************")
print("Support Vector Machine:\n\t Accuracy Score:{}\n\t Max of 10-Fold: {}\n\t 10-Fold CV:{}".format(round(svc.score(inputTrain, outputTrain), 2), round(max(svc_scores), 2), svc_scores))
print("K Nearest Neighbor:\n\t Accuracy Score:{}\n\t Max of 10-Fold: {}\n\t 10-Fold CV:{}".format(round(knn.score(inputTrain, outputTrain), 2), round(max(knn_scores), 2), knn_scores))
print("Logistic Regression:\n\t Accuracy Score:{}\n\t Max of 10-Fold: {}\n\t 10-Fold CV:{}".format(round(log.score(inputTrain, outputTrain), 2), round(max(log_scores), 2), log_scores))
print("Random Forest:\n\t Accuracy Score:{}\n\t Max of 10-Fold: {}\n\t 10-Fold CV:{}".format(round(rfc.score(inputTrain, outputTrain), 2), round(max(rfc_scores), 2), rfc_scores))
print("GaussianNB:\n\t Accuracy Score:{}\n\t Max of 10-Fold: {}\n\t 10-Fold CV:{}".format(round(gnb.score(inputTrain, outputTrain), 2), round(max(gnb_scores), 2), gnb_scores))
print()
|
the-stack_0_18723 | # -*- coding: utf-8 -*-
'''
Manage Route53 records
.. versionadded:: 2014.7.0
Create and delete Route53 records. Be aware that this interacts with Amazon's
services, and so may incur charges.
This module uses ``boto``, which can be installed via package, or pip.
This module accepts explicit route53 credentials but can also utilize
IAM roles assigned to the instance through Instance Profiles. Dynamic
credentials are then automatically obtained from AWS API and no further
configuration is necessary. More information available `here
<http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html>`_.
If IAM roles are not used you need to specify them either in a pillar file or
in the minion's config file:
.. code-block:: yaml
route53.keyid: GKTADJGHEIQSXMKKRBJ08H
route53.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
It's also possible to specify ``key``, ``keyid`` and ``region`` via a profile, either
passed in as a dict, or as a string to pull from pillars or minion config:
.. code-block:: yaml
myprofile:
keyid: GKTADJGHEIQSXMKKRBJ08H
key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
region: us-east-1
.. code-block:: yaml
mycnamerecord:
boto_route53.present:
- name: test.example.com.
- value: my-elb.us-east-1.elb.amazonaws.com.
- zone: example.com.
- ttl: 60
- record_type: CNAME
- region: us-east-1
- keyid: GKTADJGHEIQSXMKKRBJ08H
- key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
# Using a profile from pillars
myarecord:
boto_route53.present:
- name: test.example.com.
- value: 1.1.1.1
- zone: example.com.
- ttl: 60
- record_type: A
- region: us-east-1
- profile: myprofile
# Passing in a profile
myarecord:
boto_route53.present:
- name: test.example.com.
- value: 1.1.1.1
- zone: example.com.
- ttl: 60
- record_type: A
- region: us-east-1
- profile:
keyid: GKTADJGHEIQSXMKKRBJ08H
key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import uuid
# Import Salt Libs
import salt.utils.data
import salt.utils.json
from salt.ext import six
from salt.exceptions import SaltInvocationError
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if boto is available.
'''
return 'boto_route53' if 'boto_route53.get_record' in __salt__ else False
def rr_present(*args, **kwargs):
return present(*args, **kwargs)
def present(name, value, zone, record_type, ttl=None, identifier=None, region=None, key=None,
keyid=None, profile=None, wait_for_sync=True, split_dns=False, private_zone=False):
'''
Ensure the Route53 record is present.
name
Name of the record.
value
Value of the record. As a special case, you can pass in:
`private:<Name tag>` to have the function autodetermine the private IP
`public:<Name tag>` to have the function autodetermine the public IP
zone
The zone to create the record in.
record_type
The record type (A, NS, MX, TXT, etc.)
ttl
The time to live for the record.
identifier
The unique identifier to use for this record.
region
The region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string) that contains a dict
with region, key and keyid.
wait_for_sync
Wait for an INSYNC change status from Route53 before returning success.
split_dns
Route53 supports parallel public and private DNS zones with the same name.
private_zone
If using split_dns, specify if this is the private zone.
'''
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
# If a list is passed in for value, change it to a comma-separated string
# So it will work with subsequent boto module calls and string functions
if isinstance(value, list):
value = ','.join(value)
elif value.startswith('private:') or value.startswith('public:'):
name_tag = value.split(':', 1)[1]
in_states = ('pending', 'rebooting', 'running', 'stopping', 'stopped')
r = __salt__['boto_ec2.find_instances'](name=name_tag,
return_objs=True,
in_states=in_states,
profile=profile)
if len(r) < 1:
ret['comment'] = 'Error: instance with Name tag {0} not found'.format(name_tag)
ret['result'] = False
return ret
if len(r) > 1:
ret['comment'] = 'Error: Name tag {0} matched more than one instance'.format(name_tag)
ret['result'] = False
return ret
instance = r[0]
private_ip = getattr(instance, 'private_ip_address', None)
public_ip = getattr(instance, 'ip_address', None)
if value.startswith('private:'):
value = private_ip
log.info('Found private IP %s for instance %s', private_ip, name_tag)
else:
if public_ip is None:
ret['comment'] = 'Error: No Public IP assigned to instance with Name {0}'.format(name_tag)
ret['result'] = False
return ret
value = public_ip
log.info('Found public IP %s for instance %s', public_ip, name_tag)
try:
record = __salt__['boto_route53.get_record'](name, zone, record_type,
False, region, key, keyid,
profile, split_dns,
private_zone, identifier)
except SaltInvocationError as err:
ret['comment'] = 'Error: {0}'.format(err)
ret['result'] = False
return ret
if isinstance(record, dict) and not record:
if __opts__['test']:
ret['comment'] = 'Route53 record {0} set to be added.'.format(name)
ret['result'] = None
return ret
added = __salt__['boto_route53.add_record'](name, value, zone,
record_type, identifier,
ttl, region, key, keyid,
profile, wait_for_sync,
split_dns, private_zone)
if added:
ret['changes']['old'] = None
ret['changes']['new'] = {'name': name,
'value': value,
'record_type': record_type,
'ttl': ttl,
'identifier': identifier}
ret['comment'] = 'Added {0} Route53 record.'.format(name)
else:
ret['result'] = False
ret['comment'] = 'Failed to add {0} Route53 record.'.format(name)
return ret
elif record:
need_to_update = False
# Values can be a comma separated list and some values will end with a
# period (even if we set it without one). To easily check this we need
# to split and check with the period stripped from the input and what's
# in route53.
# TODO: figure out if this will cause us problems with some records.
_values = [x.rstrip('.') for x in value.split(',')]
_r_values = [x.rstrip('.') for x in record['value'].split(',')]
_values.sort()
_r_values.sort()
if _values != _r_values:
need_to_update = True
if identifier and identifier != record['identifier']:
need_to_update = True
if ttl and six.text_type(ttl) != six.text_type(record['ttl']):
need_to_update = True
if need_to_update:
if __opts__['test']:
ret['comment'] = 'Route53 record {0} set to be updated.'.format(name)
ret['result'] = None
return ret
updated = __salt__['boto_route53.update_record'](name, value, zone,
record_type,
identifier, ttl,
region, key,
keyid, profile,
wait_for_sync,
split_dns,
private_zone)
if updated:
ret['changes']['old'] = record
ret['changes']['new'] = {'name': name,
'value': value,
'record_type': record_type,
'ttl': ttl,
'identifier': identifier}
ret['comment'] = 'Updated {0} Route53 record.'.format(name)
else:
ret['result'] = False
ret['comment'] = 'Failed to update {0} Route53 record.'.format(name)
else:
ret['comment'] = '{0} exists.'.format(name)
return ret
def rr_absent(*args, **kwargs):
return absent(*args, **kwargs)
def absent(
name,
zone,
record_type,
identifier=None,
region=None,
key=None,
keyid=None,
profile=None,
wait_for_sync=True,
split_dns=False,
private_zone=False):
'''
Ensure the Route53 record is deleted.
name
Name of the record.
zone
The zone to delete the record from.
record_type
The record type (A, NS, MX, TXT, etc.)
identifier
An identifier to match for deletion.
region
The region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string)
that contains a dict with region, key and keyid.
wait_for_sync
Wait for an INSYNC change status from Route53.
split_dns
Route53 supports a public and private DNS zone with the same
names.
private_zone
If using split_dns, specify if this is the private zone.
'''
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
record = __salt__['boto_route53.get_record'](name, zone, record_type,
False, region, key, keyid,
profile, split_dns,
private_zone, identifier)
if record:
if __opts__['test']:
ret['comment'] = 'Route53 record {0} set to be deleted.'.format(name)
ret['result'] = None
return ret
deleted = __salt__['boto_route53.delete_record'](name, zone,
record_type,
identifier, False,
region, key, keyid,
profile,
wait_for_sync,
split_dns,
private_zone)
if deleted:
ret['changes']['old'] = record
ret['changes']['new'] = None
ret['comment'] = 'Deleted {0} Route53 record.'.format(name)
else:
ret['result'] = False
ret['comment'] = 'Failed to delete {0} Route53 record.'.format(name)
else:
ret['comment'] = '{0} does not exist.'.format(name)
return ret
def hosted_zone_present(name, domain_name=None, private_zone=False, caller_ref=None, comment='',
vpc_id=None, vpc_name=None, vpc_region=None, region=None, key=None,
keyid=None, profile=None):
'''
Ensure a hosted zone exists with the given attributes. Note that most
things cannot be modified once a zone is created - it must be deleted and
re-spun to update these attributes:
- private_zone (AWS API limitation).
- comment (the appropriate call exists in the AWS API and in boto3, but has
not, as of this writing, been added to boto2).
- vpc_id (same story - we really need to rewrite this module with boto3)
- vpc_name (really just a pointer to vpc_id anyway).
- vpc_region (again, supported in boto3 but not boto2).
If you need the ability to update these attributes, please use the newer
boto3_route53 module instead.
name
The name of the state definition.
domain_name
The name of the domain. This must be fully-qualified, terminating with a period. This is
the name you have registered with your domain registrar. It is also the name you will
delegate from your registrar to the Amazon Route 53 delegation servers returned in response
to this request. Defaults to the value of name if not provided.
private_zone
Set True if creating a private hosted zone.
caller_ref
A unique string that identifies the request and that allows create_hosted_zone() calls to be
retried without the risk of executing the operation twice. This helps ensure idempotency
across state calls, but can cause issues if a zone is deleted and then an attempt is made
to recreate it with the same caller_ref. If not provided, a unique UUID will be generated
at each state run, which avoids the risk of the above (transient) error. This option is
generally not needed. Maximum length of 128.
comment
Any comments you want to include about the hosted zone.
vpc_id
When creating a private hosted zone, either the VPC ID or VPC Name to associate with is
required. Exclusive with vpe_name. Ignored when creating a non-private zone.
vpc_name
When creating a private hosted zone, either the VPC ID or VPC Name to associate with is
required. Exclusive with vpe_id. Ignored when creating a non-private zone.
vpc_region
When creating a private hosted zone, the region of the associated VPC is required. If not
provided, an effort will be made to determine it from vpc_id or vpc_name, where possible.
If this fails, you'll need to provide an explicit value for this option. Ignored when
creating a non-private zone.
'''
domain_name = domain_name if domain_name else name
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
# First translaste vpc_name into a vpc_id if possible
if private_zone:
if not salt.utils.data.exactly_one((vpc_name, vpc_id)):
raise SaltInvocationError('Either vpc_name or vpc_id is required when creating a '
'private zone.')
vpcs = __salt__['boto_vpc.describe_vpcs'](
vpc_id=vpc_id, name=vpc_name, region=region, key=key,
keyid=keyid, profile=profile).get('vpcs', [])
if vpc_region and vpcs:
vpcs = [v for v in vpcs if v['region'] == vpc_region]
if not vpcs:
msg = 'Private zone requested but a VPC matching given criteria not found.'
log.error(msg)
ret['comment'] = msg
ret['result'] = False
return ret
if len(vpcs) > 1:
log.error(
'Private zone requested but multiple VPCs matching given '
'criteria found: %s', [v['id'] for v in vpcs]
)
return None
vpc = vpcs[0]
if vpc_name:
vpc_id = vpc['id']
if not vpc_region:
vpc_region = vpc['region']
# Next, see if it (or they) exist at all, anywhere?
deets = __salt__['boto_route53.describe_hosted_zones'](
domain_name=domain_name, region=region, key=key, keyid=keyid,
profile=profile)
create = False
if not deets:
create = True
else: # Something exists - now does it match our criteria?
if (salt.utils.json.loads(deets['HostedZone']['Config']['PrivateZone']) !=
private_zone):
create = True
else:
if private_zone:
for d in deets.get('VPCs', {}):
if (d['VPCId'] == vpc_id
and d['VPCRegion'] == vpc_region):
create = False
break
else:
create = True
if not create:
ret['comment'] = 'Hostd Zone {0} already in desired state'.format(
domain_name)
else:
# Until we get modifies in place with boto3, the best option is to
# attempt creation and let route53 tell us if we're stepping on
# toes. We can't just fail, because some scenarios (think split
# horizon DNS) require zones with identical names but different
# settings...
log.info('A Hosted Zone with name %s already exists, but with '
'different settings. Will attempt to create the one '
'requested on the assumption this is what is desired. '
'This may fail...', domain_name)
if create:
if caller_ref is None:
caller_ref = six.text_type(uuid.uuid4())
if __opts__['test']:
ret['comment'] = 'Route53 Hosted Zone {0} set to be added.'.format(
domain_name)
ret['result'] = None
return ret
res = __salt__['boto_route53.create_hosted_zone'](domain_name=domain_name,
caller_ref=caller_ref, comment=comment, private_zone=private_zone,
vpc_id=vpc_id, vpc_region=vpc_region, region=region, key=key,
keyid=keyid, profile=profile)
if res:
msg = 'Hosted Zone {0} successfully created'.format(domain_name)
log.info(msg)
ret['comment'] = msg
ret['changes']['old'] = None
ret['changes']['new'] = res
else:
ret['comment'] = 'Creating Hosted Zone {0} failed'.format(
domain_name)
ret['result'] = False
return ret
def hosted_zone_absent(name, domain_name=None, region=None, key=None,
keyid=None, profile=None):
'''
Ensure the Route53 Hostes Zone described is absent
name
The name of the state definition.
domain_name
The FQDN (including final period) of the zone you wish absent. If not
provided, the value of name will be used.
'''
domain_name = domain_name if domain_name else name
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
deets = __salt__['boto_route53.describe_hosted_zones'](
domain_name=domain_name, region=region, key=key, keyid=keyid,
profile=profile)
if not deets:
ret['comment'] = 'Hosted Zone {0} already absent'.format(domain_name)
log.info(ret['comment'])
return ret
if __opts__['test']:
ret['comment'] = 'Route53 Hosted Zone {0} set to be deleted.'.format(
domain_name)
ret['result'] = None
return ret
# Not entirely comfortable with this - no safety checks around pub/priv, VPCs
# or anything else. But this is all the module function exposes, so hmph.
# Inclined to put it on the "wait 'til we port to boto3" pile in any case :)
if __salt__['boto_route53.delete_zone'](
zone=domain_name, region=region, key=key, keyid=keyid,
profile=profile):
ret['comment'] = 'Route53 Hosted Zone {0} deleted'.format(domain_name)
log.info(ret['comment'])
ret['changes']['old'] = deets
ret['changes']['new'] = None
return ret
|
the-stack_0_18724 | """
Train Soft-Intro VAE for image datasets
Author: Tal Daniel
"""
# imports
# torch and friends
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torch.optim as optim
from torch.utils.data import DataLoader
import torchvision.utils as vutils
import torch.nn.functional as F
from torchvision.utils import make_grid
from torchvision.datasets import CIFAR10, MNIST, FashionMNIST, SVHN
from torchvision import transforms
# standard
import os
import random
import time
import numpy as np
from tqdm import tqdm
import pickle
from dataset import ImageDatasetFromFile, DigitalMonstersDataset
from metrics.fid_score import calculate_fid_given_dataset
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use('Agg')
"""
Models
"""
class ResidualBlock(nn.Module):
"""
https://github.com/hhb072/IntroVAE
Difference: self.bn2 on output and not on (output + identity)
"""
def __init__(self, inc=64, outc=64, groups=1, scale=1.0):
super(ResidualBlock, self).__init__()
midc = int(outc * scale)
if inc is not outc:
self.conv_expand = nn.Conv2d(in_channels=inc, out_channels=outc, kernel_size=1, stride=1, padding=0,
groups=1, bias=False)
else:
self.conv_expand = None
self.conv1 = nn.Conv2d(in_channels=inc, out_channels=midc, kernel_size=3, stride=1, padding=1, groups=groups,
bias=False)
self.bn1 = nn.BatchNorm2d(midc)
self.relu1 = nn.LeakyReLU(0.2, inplace=True)
self.conv2 = nn.Conv2d(in_channels=midc, out_channels=outc, kernel_size=3, stride=1, padding=1, groups=groups,
bias=False)
self.bn2 = nn.BatchNorm2d(outc)
self.relu2 = nn.LeakyReLU(0.2, inplace=True)
def forward(self, x):
if self.conv_expand is not None:
identity_data = self.conv_expand(x)
else:
identity_data = x
output = self.relu1(self.bn1(self.conv1(x)))
output = self.conv2(output)
output = self.bn2(output)
output = self.relu2(torch.add(output, identity_data))
return output
class Encoder(nn.Module):
def __init__(self, cdim=3, zdim=512, channels=(64, 128, 256, 512, 512, 512), image_size=256, conditional=False,
cond_dim=10):
super(Encoder, self).__init__()
self.zdim = zdim
self.cdim = cdim
self.image_size = image_size
self.conditional = conditional
self.cond_dim = cond_dim
cc = channels[0]
self.main = nn.Sequential(
nn.Conv2d(cdim, cc, 5, 1, 2, bias=False),
nn.BatchNorm2d(cc),
nn.LeakyReLU(0.2),
nn.AvgPool2d(2),
)
sz = image_size // 2
for ch in channels[1:]:
self.main.add_module('res_in_{}'.format(sz), ResidualBlock(cc, ch, scale=1.0))
self.main.add_module('down_to_{}'.format(sz // 2), nn.AvgPool2d(2))
cc, sz = ch, sz // 2
self.main.add_module('res_in_{}'.format(sz), ResidualBlock(cc, cc, scale=1.0))
self.conv_output_size = self.calc_conv_output_size()
num_fc_features = torch.zeros(self.conv_output_size).view(-1).shape[0]
print("conv shape: ", self.conv_output_size)
print("num fc features: ", num_fc_features)
if self.conditional:
self.fc = nn.Linear(num_fc_features + self.cond_dim, 2 * zdim)
else:
self.fc = nn.Linear(num_fc_features, 2 * zdim)
def calc_conv_output_size(self):
dummy_input = torch.zeros(1, self.cdim, self.image_size, self.image_size)
dummy_input = self.main(dummy_input)
return dummy_input[0].shape
def forward(self, x, o_cond=None):
y = self.main(x).view(x.size(0), -1)
if self.conditional and o_cond is not None:
y = torch.cat([y, o_cond], dim=1)
y = self.fc(y)
mu, logvar = y.chunk(2, dim=1)
return mu, logvar
class Decoder(nn.Module):
def __init__(self, cdim=3, zdim=512, channels=(64, 128, 256, 512, 512, 512), image_size=256, conditional=False,
conv_input_size=None, cond_dim=10):
super(Decoder, self).__init__()
self.cdim = cdim
self.image_size = image_size
self.conditional = conditional
cc = channels[-1]
self.conv_input_size = conv_input_size
if conv_input_size is None:
num_fc_features = cc * 4 * 4
else:
num_fc_features = torch.zeros(self.conv_input_size).view(-1).shape[0]
self.cond_dim = cond_dim
if self.conditional:
self.fc = nn.Sequential(
nn.Linear(zdim + self.cond_dim, num_fc_features),
nn.ReLU(True),
)
else:
self.fc = nn.Sequential(
nn.Linear(zdim, num_fc_features),
nn.ReLU(True),
)
sz = 4
self.main = nn.Sequential()
for ch in channels[::-1]:
self.main.add_module('res_in_{}'.format(sz), ResidualBlock(cc, ch, scale=1.0))
self.main.add_module('up_to_{}'.format(sz * 2), nn.Upsample(scale_factor=2, mode='nearest'))
cc, sz = ch, sz * 2
self.main.add_module('res_in_{}'.format(sz), ResidualBlock(cc, cc, scale=1.0))
self.main.add_module('predict', nn.Conv2d(cc, cdim, 5, 1, 2))
def forward(self, z, y_cond=None):
z = z.view(z.size(0), -1)
if self.conditional and y_cond is not None:
y_cond = y_cond.view(y_cond.size(0), -1)
z = torch.cat([z, y_cond], dim=1)
y = self.fc(z)
y = y.view(z.size(0), *self.conv_input_size)
y = self.main(y)
return y
class SoftIntroVAE(nn.Module):
def __init__(self, cdim=3, zdim=512, channels=(64, 128, 256, 512, 512, 512), image_size=256, conditional=False,
cond_dim=10):
super(SoftIntroVAE, self).__init__()
self.zdim = zdim
self.conditional = conditional
self.cond_dim = cond_dim
self.encoder = Encoder(cdim, zdim, channels, image_size, conditional=conditional, cond_dim=cond_dim)
self.decoder = Decoder(cdim, zdim, channels, image_size, conditional=conditional,
conv_input_size=self.encoder.conv_output_size, cond_dim=cond_dim)
def forward(self, x, o_cond=None, deterministic=False):
if self.conditional and o_cond is not None:
mu, logvar = self.encode(x, o_cond=o_cond)
if deterministic:
z = mu
else:
z = reparameterize(mu, logvar)
y = self.decode(z, y_cond=o_cond)
else:
mu, logvar = self.encode(x)
if deterministic:
z = mu
else:
z = reparameterize(mu, logvar)
y = self.decode(z)
return mu, logvar, z, y
def sample(self, z, y_cond=None):
y = self.decode(z, y_cond=y_cond)
return y
def sample_with_noise(self, num_samples=1, device=torch.device("cpu"), y_cond=None):
z = torch.randn(num_samples, self.z_dim).to(device)
return self.decode(z, y_cond=y_cond)
def encode(self, x, o_cond=None):
if self.conditional and o_cond is not None:
mu, logvar = self.encoder(x, o_cond=o_cond)
else:
mu, logvar = self.encoder(x)
return mu, logvar
def decode(self, z, y_cond=None):
if self.conditional and y_cond is not None:
y = self.decoder(z, y_cond=y_cond)
else:
y = self.decoder(z)
return y
"""
Helpers
"""
def calc_kl(logvar, mu, mu_o=0.0, logvar_o=0.0, reduce='sum'):
"""
Calculate kl-divergence
:param logvar: log-variance from the encoder
:param mu: mean from the encoder
:param mu_o: negative mean for outliers (hyper-parameter)
:param logvar_o: negative log-variance for outliers (hyper-parameter)
:param reduce: type of reduce: 'sum', 'none'
:return: kld
"""
if not isinstance(mu_o, torch.Tensor):
mu_o = torch.tensor(mu_o).to(mu.device)
if not isinstance(logvar_o, torch.Tensor):
logvar_o = torch.tensor(logvar_o).to(mu.device)
kl = -0.5 * (1 + logvar - logvar_o - logvar.exp() / torch.exp(logvar_o) - (mu - mu_o).pow(2) / torch.exp(
logvar_o)).sum(1)
if reduce == 'sum':
kl = torch.sum(kl)
elif reduce == 'mean':
kl = torch.mean(kl)
return kl
def reparameterize(mu, logvar):
"""
This function applies the reparameterization trick:
z = mu(X) + sigma(X)^0.5 * epsilon, where epsilon ~ N(0,I)
:param mu: mean of x
:param logvar: log variaance of x
:return z: the sampled latent variable
"""
device = mu.device
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std).to(device)
return mu + eps * std
def calc_reconstruction_loss(x, recon_x, loss_type='mse', reduction='sum'):
"""
:param x: original inputs
:param recon_x: reconstruction of the VAE's input
:param loss_type: "mse", "l1", "bce"
:param reduction: "sum", "mean", "none"
:return: recon_loss
"""
if reduction not in ['sum', 'mean', 'none']:
raise NotImplementedError
recon_x = recon_x.view(recon_x.size(0), -1)
x = x.view(x.size(0), -1)
if loss_type == 'mse':
recon_error = F.mse_loss(recon_x, x, reduction='none')
recon_error = recon_error.sum(1)
if reduction == 'sum':
recon_error = recon_error.sum()
elif reduction == 'mean':
recon_error = recon_error.mean()
elif loss_type == 'l1':
recon_error = F.l1_loss(recon_x, x, reduction=reduction)
elif loss_type == 'bce':
recon_error = F.binary_cross_entropy(recon_x, x, reduction=reduction)
else:
raise NotImplementedError
return recon_error
def str_to_list(x):
return [int(xi) for xi in x.split(',')]
def is_image_file(filename):
return any(filename.endswith(extension) for extension in [".jpg", ".png", ".jpeg", ".bmp"])
def record_scalar(writer, scalar_list, scalar_name_list, cur_iter):
scalar_name_list = scalar_name_list[1:-1].split(',')
for idx, item in enumerate(scalar_list):
writer.add_scalar(scalar_name_list[idx].strip(' '), item, cur_iter)
def record_image(writer, image_list, cur_iter, num_rows=8):
image_to_show = torch.cat(image_list, dim=0)
writer.add_image('visualization', make_grid(image_to_show, nrow=num_rows), cur_iter)
def load_model(model, pretrained, device):
weights = torch.load(pretrained, map_location=device)
model.load_state_dict(weights['model'], strict=False)
def save_checkpoint(model, epoch, iteration, prefix=""):
model_out_path = "./saves/" + prefix + "model_epoch_{}_iter_{}.pth".format(epoch, iteration)
state = {"epoch": epoch, "model": model.state_dict()}
if not os.path.exists("./saves/"):
os.makedirs("./saves/")
torch.save(state, model_out_path)
print("model checkpoint saved @ {}".format(model_out_path))
"""
Train Functions
"""
def train_soft_intro_vae(dataset='cifar10', z_dim=128, lr_e=2e-4, lr_d=2e-4, batch_size=128, num_workers=4,
start_epoch=0, exit_on_negative_diff=False,
num_epochs=250, num_vae=0, save_interval=50, recon_loss_type="mse",
beta_kl=1.0, beta_rec=1.0, beta_neg=1.0, test_iter=1000, seed=-1, pretrained=None,
device=torch.device("cpu"), num_row=8, gamma_r=1e-8, with_fid=False):
"""
:param dataset: dataset to train on: ['cifar10', 'mnist', 'fmnist', 'svhn', 'monsters128', 'celeb128', 'celeb256', 'celeb1024']
:param z_dim: latent dimensions
:param lr_e: learning rate for encoder
:param lr_d: learning rate for decoder
:param batch_size: batch size
:param num_workers: num workers for the loading the data
:param start_epoch: epoch to start from
:param exit_on_negative_diff: stop run if mean kl diff between fake and real is negative after 50 epochs
:param num_epochs: total number of epochs to run
:param num_vae: number of epochs for vanilla vae training
:param save_interval: epochs between checkpoint saving
:param recon_loss_type: type of reconstruction loss ('mse', 'l1', 'bce')
:param beta_kl: beta coefficient for the kl divergence
:param beta_rec: beta coefficient for the reconstruction loss
:param beta_neg: beta coefficient for the kl divergence in the expELBO function
:param test_iter: iterations between sample image saving
:param seed: seed
:param pretrained: path to pretrained model, to continue training
:param device: device to run calculation on - torch.device('cuda:x') or torch.device('cpu')
:param num_row: number of images in a row gor the sample image saving
:param gamma_r: coefficient for the reconstruction loss for fake data in the decoder
:param with_fid: calculate FID during training (True/False)
:return:
"""
if seed != -1:
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
print("random seed: ", seed)
# --------------build models -------------------------
if dataset == 'cifar10':
image_size = 32
channels = [64, 128, 256]
train_set = CIFAR10(root='./cifar10_ds', train=True, download=True, transform=transforms.ToTensor())
ch = 3
elif dataset == 'celeb128':
channels = [64, 128, 256, 512, 512]
image_size = 128
ch = 3
output_height = 128
train_size = 162770
data_root = '../data/celeb256/img_align_celeba'
image_list = [x for x in os.listdir(data_root) if is_image_file(x)]
train_list = image_list[:train_size]
assert len(train_list) > 0
train_set = ImageDatasetFromFile(train_list, data_root, input_height=None, crop_height=None,
output_height=output_height, is_mirror=True)
elif dataset == 'celeb256':
channels = [64, 128, 256, 512, 512, 512]
image_size = 256
ch = 3
output_height = 256
train_size = 162770
data_root = '../data/celeb256/img_align_celeba'
image_list = [x for x in os.listdir(data_root) if is_image_file(x)]
train_list = image_list[:train_size]
assert len(train_list) > 0
train_set = ImageDatasetFromFile(train_list, data_root, input_height=None, crop_height=None,
output_height=output_height, is_mirror=True)
elif dataset == 'celeb1024':
channels = [16, 32, 64, 128, 256, 512, 512, 512]
image_size = 1024
ch = 3
output_height = 1024
train_size = 29000
data_root = './' + dataset
image_list = [x for x in os.listdir(data_root) if is_image_file(x)]
train_list = image_list[:train_size]
assert len(train_list) > 0
train_set = ImageDatasetFromFile(train_list, data_root, input_height=None, crop_height=None,
output_height=output_height, is_mirror=True)
elif dataset == 'monsters128':
channels = [64, 128, 256, 512, 512]
image_size = 128
ch = 3
data_root = './monsters_ds/'
train_set = DigitalMonstersDataset(root_path=data_root, output_height=image_size)
elif dataset == 'svhn':
image_size = 32
channels = [64, 128, 256]
train_set = SVHN(root='./svhn', split='train', transform=transforms.ToTensor(), download=True)
ch = 3
elif dataset == 'fmnist':
image_size = 28
channels = [64, 128]
train_set = FashionMNIST(root='./fmnist_ds', train=True, download=True, transform=transforms.ToTensor())
ch = 1
elif dataset == 'mnist':
image_size = 28
channels = [64, 128]
train_set = MNIST(root='./mnist_ds', train=True, download=True, transform=transforms.ToTensor())
ch = 1
else:
raise NotImplementedError("dataset is not supported")
model = SoftIntroVAE(cdim=ch, zdim=z_dim, channels=channels, image_size=image_size).to(device)
if pretrained is not None:
load_model(model, pretrained, device)
print(model)
fig_dir = './figures_' + dataset
os.makedirs(fig_dir, exist_ok=True)
optimizer_e = optim.Adam(model.encoder.parameters(), lr=lr_e)
optimizer_d = optim.Adam(model.decoder.parameters(), lr=lr_d)
e_scheduler = optim.lr_scheduler.MultiStepLR(optimizer_e, milestones=(350,), gamma=0.1)
d_scheduler = optim.lr_scheduler.MultiStepLR(optimizer_d, milestones=(350,), gamma=0.1)
scale = 1 / (ch * image_size ** 2) # normalize by images size (channels * height * width)
train_data_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True,
num_workers=num_workers)
start_time = time.time()
cur_iter = 0
kls_real = []
kls_fake = []
kls_rec = []
rec_errs = []
exp_elbos_f = []
exp_elbos_r = []
best_fid = None
for epoch in range(start_epoch, num_epochs):
if with_fid and ((epoch == 0) or (epoch >= 100 and epoch % 20 == 0) or epoch == num_epochs - 1):
with torch.no_grad():
print("calculating fid...")
fid = calculate_fid_given_dataset(train_data_loader, model, batch_size, cuda=True, dims=2048,
device=device, num_images=50000)
print("fid:", fid)
if best_fid is None:
best_fid = fid
elif best_fid > fid:
print("best fid updated: {} -> {}".format(best_fid, fid))
best_fid = fid
# save
save_epoch = epoch
prefix = dataset + "_soft_intro" + "_betas_" + str(beta_kl) + "_" + str(beta_neg) + "_" + str(
beta_rec) + "_" + "fid_" + str(fid) + "_"
save_checkpoint(model, save_epoch, cur_iter, prefix)
diff_kls = []
# save models
if epoch % save_interval == 0 and epoch > 0:
save_epoch = (epoch // save_interval) * save_interval
prefix = dataset + "_soft_intro" + "_betas_" + str(beta_kl) + "_" + str(beta_neg) + "_" + str(
beta_rec) + "_"
save_checkpoint(model, save_epoch, cur_iter, prefix)
model.train()
batch_kls_real = []
batch_kls_fake = []
batch_kls_rec = []
batch_rec_errs = []
batch_exp_elbo_f = []
batch_exp_elbo_r = []
pbar = tqdm(iterable=train_data_loader)
for batch in pbar:
# --------------train------------
if dataset in ["cifar10", "svhn", "fmnist", "mnist"]:
batch = batch[0]
if epoch < num_vae:
if len(batch.size()) == 3:
batch = batch.unsqueeze(0)
batch_size = batch.size(0)
real_batch = batch.to(device)
# =========== Update E, D ================
real_mu, real_logvar, z, rec = model(real_batch)
loss_rec = calc_reconstruction_loss(real_batch, rec, loss_type=recon_loss_type, reduction="mean")
loss_kl = calc_kl(real_logvar, real_mu, reduce="mean")
loss = beta_rec * loss_rec + beta_kl * loss_kl
optimizer_d.zero_grad()
optimizer_e.zero_grad()
loss.backward()
optimizer_e.step()
optimizer_d.step()
pbar.set_description_str('epoch #{}'.format(epoch))
pbar.set_postfix(r_loss=loss_rec.data.cpu().item(), kl=loss_kl.data.cpu().item())
if cur_iter % test_iter == 0:
vutils.save_image(torch.cat([real_batch, rec], dim=0).data.cpu(),
'{}/image_{}.jpg'.format(fig_dir, cur_iter), nrow=num_row)
else:
if len(batch.size()) == 3:
batch = batch.unsqueeze(0)
b_size = batch.size(0)
noise_batch = torch.randn(size=(b_size, z_dim)).to(device)
real_batch = batch.to(device)
# =========== Update E ================
for param in model.encoder.parameters():
param.requires_grad = True
for param in model.decoder.parameters():
param.requires_grad = False
fake = model.sample(noise_batch)
real_mu, real_logvar = model.encode(real_batch)
z = reparameterize(real_mu, real_logvar)
rec = model.decoder(z)
loss_rec = calc_reconstruction_loss(real_batch, rec, loss_type=recon_loss_type, reduction="mean")
lossE_real_kl = calc_kl(real_logvar, real_mu, reduce="mean")
rec_mu, rec_logvar, z_rec, rec_rec = model(rec.detach())
fake_mu, fake_logvar, z_fake, rec_fake = model(fake.detach())
kl_rec = calc_kl(rec_logvar, rec_mu, reduce="none")
kl_fake = calc_kl(fake_logvar, fake_mu, reduce="none")
loss_rec_rec_e = calc_reconstruction_loss(rec, rec_rec, loss_type=recon_loss_type, reduction='none')
while len(loss_rec_rec_e.shape) > 1:
loss_rec_rec_e = loss_rec_rec_e.sum(-1)
loss_rec_fake_e = calc_reconstruction_loss(fake, rec_fake, loss_type=recon_loss_type, reduction='none')
while len(loss_rec_fake_e.shape) > 1:
loss_rec_fake_e = loss_rec_fake_e.sum(-1)
expelbo_rec = (-2 * scale * (beta_rec * loss_rec_rec_e + beta_neg * kl_rec)).exp().mean()
expelbo_fake = (-2 * scale * (beta_rec * loss_rec_fake_e + beta_neg * kl_fake)).exp().mean()
lossE_fake = 0.25 * (expelbo_rec + expelbo_fake)
lossE_real = scale * (beta_rec * loss_rec + beta_kl * lossE_real_kl)
lossE = lossE_real + lossE_fake
optimizer_e.zero_grad()
lossE.backward()
optimizer_e.step()
# ========= Update D ==================
for param in model.encoder.parameters():
param.requires_grad = False
for param in model.decoder.parameters():
param.requires_grad = True
fake = model.sample(noise_batch)
rec = model.decoder(z.detach())
loss_rec = calc_reconstruction_loss(real_batch, rec, loss_type=recon_loss_type, reduction="mean")
rec_mu, rec_logvar = model.encode(rec)
z_rec = reparameterize(rec_mu, rec_logvar)
fake_mu, fake_logvar = model.encode(fake)
z_fake = reparameterize(fake_mu, fake_logvar)
rec_rec = model.decode(z_rec.detach())
rec_fake = model.decode(z_fake.detach())
loss_rec_rec = calc_reconstruction_loss(rec.detach(), rec_rec, loss_type=recon_loss_type,
reduction="mean")
loss_fake_rec = calc_reconstruction_loss(fake.detach(), rec_fake, loss_type=recon_loss_type,
reduction="mean")
lossD_rec_kl = calc_kl(rec_logvar, rec_mu, reduce="mean")
lossD_fake_kl = calc_kl(fake_logvar, fake_mu, reduce="mean")
lossD = scale * (loss_rec * beta_rec + (
lossD_rec_kl + lossD_fake_kl) * 0.5 * beta_kl + gamma_r * 0.5 * beta_rec * (
loss_rec_rec + loss_fake_rec))
optimizer_d.zero_grad()
lossD.backward()
optimizer_d.step()
if torch.isnan(lossD) or torch.isnan(lossE):
raise SystemError
dif_kl = -lossE_real_kl.data.cpu() + lossD_fake_kl.data.cpu()
pbar.set_description_str('epoch #{}'.format(epoch))
pbar.set_postfix(r_loss=loss_rec.data.cpu().item(), kl=lossE_real_kl.data.cpu().item(),
diff_kl=dif_kl.item(), expelbo_f=expelbo_fake.cpu().item())
diff_kls.append(-lossE_real_kl.data.cpu().item() + lossD_fake_kl.data.cpu().item())
batch_kls_real.append(lossE_real_kl.data.cpu().item())
batch_kls_fake.append(lossD_fake_kl.cpu().item())
batch_kls_rec.append(lossD_rec_kl.data.cpu().item())
batch_rec_errs.append(loss_rec.data.cpu().item())
batch_exp_elbo_f.append(expelbo_fake.data.cpu())
batch_exp_elbo_r.append(expelbo_rec.data.cpu())
if cur_iter % test_iter == 0:
_, _, _, rec_det = model(real_batch, deterministic=True)
max_imgs = min(batch.size(0), 16)
vutils.save_image(
torch.cat([real_batch[:max_imgs], rec_det[:max_imgs], fake[:max_imgs]], dim=0).data.cpu(),
'{}/image_{}.jpg'.format(fig_dir, cur_iter), nrow=num_row)
cur_iter += 1
e_scheduler.step()
d_scheduler.step()
pbar.close()
if exit_on_negative_diff and epoch > 50 and np.mean(diff_kls) < -1.0:
print(
f'the kl difference [{np.mean(diff_kls):.3f}] between fake and real is negative (no sampling improvement)')
print("try to lower beta_neg hyperparameter")
print("exiting...")
raise SystemError("Negative KL Difference")
if epoch > num_vae - 1:
kls_real.append(np.mean(batch_kls_real))
kls_fake.append(np.mean(batch_kls_fake))
kls_rec.append(np.mean(batch_kls_rec))
rec_errs.append(np.mean(batch_rec_errs))
exp_elbos_f.append(np.mean(batch_exp_elbo_f))
exp_elbos_r.append(np.mean(batch_exp_elbo_r))
# epoch summary
print('#' * 50)
print(f'Epoch {epoch} Summary:')
print(f'beta_rec: {beta_rec}, beta_kl: {beta_kl}, beta_neg: {beta_neg}')
print(
f'rec: {rec_errs[-1]:.3f}, kl: {kls_real[-1]:.3f}, kl_fake: {kls_fake[-1]:.3f}, kl_rec: {kls_rec[-1]:.3f}')
print(
f'diff_kl: {np.mean(diff_kls):.3f}, exp_elbo_f: {exp_elbos_f[-1]:.4e}, exp_elbo_r: {exp_elbos_r[-1]:.4e}')
print(f'time: {time.time() - start_time}')
print('#' * 50)
if epoch == num_epochs - 1:
with torch.no_grad():
_, _, _, rec_det = model(real_batch, deterministic=True)
noise_batch = torch.randn(size=(b_size, z_dim)).to(device)
fake = model.sample(noise_batch)
max_imgs = min(batch.size(0), 16)
vutils.save_image(
torch.cat([real_batch[:max_imgs], rec_det[:max_imgs], fake[:max_imgs]], dim=0).data.cpu(),
'{}/image_{}.jpg'.format(fig_dir, cur_iter), nrow=num_row)
# plot graphs
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(np.arange(len(kls_real)), kls_real, label="kl_real")
ax.plot(np.arange(len(kls_fake)), kls_fake, label="kl_fake")
ax.plot(np.arange(len(kls_rec)), kls_rec, label="kl_rec")
ax.plot(np.arange(len(rec_errs)), rec_errs, label="rec_err")
ax.legend()
plt.savefig('./soft_intro_train_graphs.jpg')
with open('./soft_intro_train_graphs_data.pickle', 'wb') as fp:
graph_dict = {"kl_real": kls_real, "kl_fake": kls_fake, "kl_rec": kls_rec, "rec_err": rec_errs}
pickle.dump(graph_dict, fp)
# save models
prefix = dataset + "_soft_intro" + "_betas_" + str(beta_kl) + "_" + str(beta_neg) + "_" + str(
beta_rec) + "_"
save_checkpoint(model, epoch, cur_iter, prefix)
model.train()
if __name__ == '__main__':
"""
Recommended hyper-parameters:
- CIFAR10: beta_kl: 1.0, beta_rec: 1.0, beta_neg: 256, z_dim: 128, batch_size: 32
- SVHN: beta_kl: 1.0, beta_rec: 1.0, beta_neg: 256, z_dim: 128, batch_size: 32
- MNIST: beta_kl: 1.0, beta_rec: 1.0, beta_neg: 256, z_dim: 32, batch_size: 128
- FashionMNIST: beta_kl: 1.0, beta_rec: 1.0, beta_neg: 256, z_dim: 32, batch_size: 128
- Monsters: beta_kl: 0.2, beta_rec: 0.2, beta_neg: 256, z_dim: 128, batch_size: 16
- CelebA-HQ: beta_kl: 1.0, beta_rec: 0.5, beta_neg: 1024, z_dim: 256, batch_size: 8
"""
beta_kl = 1.0
beta_rec = 1.0
beta_neg = 256
if torch.cuda.is_available():
torch.cuda.current_device()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)
print("betas: ", beta_kl, beta_neg, beta_rec)
try:
train_soft_intro_vae(dataset="monsters128", z_dim=128, batch_size=16, num_workers=0, num_epochs=400,
num_vae=0, beta_kl=beta_kl, beta_neg=beta_neg, beta_rec=beta_rec,
device=device, save_interval=50, start_epoch=0, lr_e=2e-4, lr_d=2e-4,
pretrained=None,
test_iter=1000, with_fid=False)
except SystemError:
print("Error, probably loss is NaN, try again...")
|
the-stack_0_18725 | #!/usr/bin/env python
import random
import socket
import unittest
from framework import VppTestCase, VppTestRunner
from vpp_sub_interface import VppSubInterface, VppDot1QSubint, VppDot1ADSubint
from vpp_ip_route import VppIpRoute, VppRoutePath, VppIpMRoute, \
VppMRoutePath, MRouteItfFlags, MRouteEntryFlags, VppMplsIpBind, \
VppMplsTable, VppIpTable
from scapy.packet import Raw
from scapy.layers.l2 import Ether, Dot1Q, ARP
from scapy.layers.inet import IP, UDP, TCP, ICMP, icmptypes, icmpcodes
from util import ppp
from scapy.contrib.mpls import MPLS
class TestIPv4(VppTestCase):
""" IPv4 Test Case """
def setUp(self):
"""
Perform test setup before test case.
**Config:**
- create 3 pg interfaces
- untagged pg0 interface
- Dot1Q subinterface on pg1
- Dot1AD subinterface on pg2
- setup interfaces:
- put it into UP state
- set IPv4 addresses
- resolve neighbor address using ARP
- configure 200 fib entries
:ivar list interfaces: pg interfaces and subinterfaces.
:ivar dict flows: IPv4 packet flows in test.
:ivar list pg_if_packet_sizes: packet sizes in test.
"""
super(TestIPv4, self).setUp()
# create 3 pg interfaces
self.create_pg_interfaces(range(3))
# create 2 subinterfaces for pg1 and pg2
self.sub_interfaces = [
VppDot1QSubint(self, self.pg1, 100),
VppDot1ADSubint(self, self.pg2, 200, 300, 400)]
# packet flows mapping pg0 -> pg1.sub, pg2.sub, etc.
self.flows = dict()
self.flows[self.pg0] = [self.pg1.sub_if, self.pg2.sub_if]
self.flows[self.pg1.sub_if] = [self.pg0, self.pg2.sub_if]
self.flows[self.pg2.sub_if] = [self.pg0, self.pg1.sub_if]
# packet sizes
self.pg_if_packet_sizes = [64, 512, 1518, 9018]
self.sub_if_packet_sizes = [64, 512, 1518 + 4, 9018 + 4]
self.interfaces = list(self.pg_interfaces)
self.interfaces.extend(self.sub_interfaces)
# setup all interfaces
for i in self.interfaces:
i.admin_up()
i.config_ip4()
i.resolve_arp()
# config 2M FIB entries
self.config_fib_entries(200)
def tearDown(self):
"""Run standard test teardown and log ``show ip arp``."""
super(TestIPv4, self).tearDown()
if not self.vpp_dead:
self.logger.info(self.vapi.cli("show ip arp"))
# info(self.vapi.cli("show ip fib")) # many entries
def config_fib_entries(self, count):
"""For each interface add to the FIB table *count* routes to
"10.0.0.1/32" destination with interface's local address as next-hop
address.
:param int count: Number of FIB entries.
- *TODO:* check if the next-hop address shouldn't be remote address
instead of local address.
"""
n_int = len(self.interfaces)
percent = 0
counter = 0.0
dest_addr = socket.inet_pton(socket.AF_INET, "10.0.0.1")
dest_addr_len = 32
for i in self.interfaces:
next_hop_address = i.local_ip4n
for j in range(count / n_int):
self.vapi.ip_add_del_route(
dest_addr, dest_addr_len, next_hop_address)
counter += 1
if counter / count * 100 > percent:
self.logger.info("Configure %d FIB entries .. %d%% done" %
(count, percent))
percent += 1
def create_stream(self, src_if, packet_sizes):
"""Create input packet stream for defined interface.
:param VppInterface src_if: Interface to create packet stream for.
:param list packet_sizes: Required packet sizes.
"""
pkts = []
for i in range(0, 257):
dst_if = self.flows[src_if][i % 2]
info = self.create_packet_info(src_if, dst_if)
payload = self.info_to_payload(info)
p = (Ether(dst=src_if.local_mac, src=src_if.remote_mac) /
IP(src=src_if.remote_ip4, dst=dst_if.remote_ip4) /
UDP(sport=1234, dport=1234) /
Raw(payload))
info.data = p.copy()
if isinstance(src_if, VppSubInterface):
p = src_if.add_dot1_layer(p)
size = packet_sizes[(i // 2) % len(packet_sizes)]
self.extend_packet(p, size)
pkts.append(p)
return pkts
def verify_capture(self, dst_if, capture):
"""Verify captured input packet stream for defined interface.
:param VppInterface dst_if: Interface to verify captured packet stream
for.
:param list capture: Captured packet stream.
"""
self.logger.info("Verifying capture on interface %s" % dst_if.name)
last_info = dict()
for i in self.interfaces:
last_info[i.sw_if_index] = None
is_sub_if = False
dst_sw_if_index = dst_if.sw_if_index
if hasattr(dst_if, 'parent'):
is_sub_if = True
for packet in capture:
if is_sub_if:
# Check VLAN tags and Ethernet header
packet = dst_if.remove_dot1_layer(packet)
self.assertTrue(Dot1Q not in packet)
try:
ip = packet[IP]
udp = packet[UDP]
payload_info = self.payload_to_info(str(packet[Raw]))
packet_index = payload_info.index
self.assertEqual(payload_info.dst, dst_sw_if_index)
self.logger.debug(
"Got packet on port %s: src=%u (id=%u)" %
(dst_if.name, payload_info.src, packet_index))
next_info = self.get_next_packet_info_for_interface2(
payload_info.src, dst_sw_if_index,
last_info[payload_info.src])
last_info[payload_info.src] = next_info
self.assertTrue(next_info is not None)
self.assertEqual(packet_index, next_info.index)
saved_packet = next_info.data
# Check standard fields
self.assertEqual(ip.src, saved_packet[IP].src)
self.assertEqual(ip.dst, saved_packet[IP].dst)
self.assertEqual(udp.sport, saved_packet[UDP].sport)
self.assertEqual(udp.dport, saved_packet[UDP].dport)
except:
self.logger.error(ppp("Unexpected or invalid packet:", packet))
raise
for i in self.interfaces:
remaining_packet = self.get_next_packet_info_for_interface2(
i.sw_if_index, dst_sw_if_index, last_info[i.sw_if_index])
self.assertTrue(remaining_packet is None,
"Interface %s: Packet expected from interface %s "
"didn't arrive" % (dst_if.name, i.name))
def test_fib(self):
""" IPv4 FIB test
Test scenario:
- Create IPv4 stream for pg0 interface
- Create IPv4 tagged streams for pg1's and pg2's subinterface.
- Send and verify received packets on each interface.
"""
pkts = self.create_stream(self.pg0, self.pg_if_packet_sizes)
self.pg0.add_stream(pkts)
for i in self.sub_interfaces:
pkts = self.create_stream(i, self.sub_if_packet_sizes)
i.parent.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
pkts = self.pg0.get_capture()
self.verify_capture(self.pg0, pkts)
for i in self.sub_interfaces:
pkts = i.parent.get_capture()
self.verify_capture(i, pkts)
class TestIPv4FibCrud(VppTestCase):
""" FIB - add/update/delete - ip4 routes
Test scenario:
- add 1k,
- del 100,
- add new 1k,
- del 1.5k
..note:: Python API is too slow to add many routes, needs replacement.
"""
def config_fib_many_to_one(self, start_dest_addr, next_hop_addr, count):
"""
:param start_dest_addr:
:param next_hop_addr:
:param count:
:return list: added ips with 32 prefix
"""
added_ips = []
dest_addr = int(socket.inet_pton(socket.AF_INET,
start_dest_addr).encode('hex'),
16)
dest_addr_len = 32
n_next_hop_addr = socket.inet_pton(socket.AF_INET, next_hop_addr)
for _ in range(count):
n_dest_addr = '{:08x}'.format(dest_addr).decode('hex')
self.vapi.ip_add_del_route(n_dest_addr, dest_addr_len,
n_next_hop_addr)
added_ips.append(socket.inet_ntoa(n_dest_addr))
dest_addr += 1
return added_ips
def unconfig_fib_many_to_one(self, start_dest_addr, next_hop_addr, count):
removed_ips = []
dest_addr = int(socket.inet_pton(socket.AF_INET,
start_dest_addr).encode('hex'),
16)
dest_addr_len = 32
n_next_hop_addr = socket.inet_pton(socket.AF_INET, next_hop_addr)
for _ in range(count):
n_dest_addr = '{:08x}'.format(dest_addr).decode('hex')
self.vapi.ip_add_del_route(n_dest_addr, dest_addr_len,
n_next_hop_addr, is_add=0)
removed_ips.append(socket.inet_ntoa(n_dest_addr))
dest_addr += 1
return removed_ips
def create_stream(self, src_if, dst_if, dst_ips, count):
pkts = []
for _ in range(count):
dst_addr = random.choice(dst_ips)
info = self.create_packet_info(src_if, dst_if)
payload = self.info_to_payload(info)
p = (Ether(dst=src_if.local_mac, src=src_if.remote_mac) /
IP(src=src_if.remote_ip4, dst=dst_addr) /
UDP(sport=1234, dport=1234) /
Raw(payload))
info.data = p.copy()
self.extend_packet(p, random.choice(self.pg_if_packet_sizes))
pkts.append(p)
return pkts
def _find_ip_match(self, find_in, pkt):
for p in find_in:
if self.payload_to_info(str(p[Raw])) == \
self.payload_to_info(str(pkt[Raw])):
if p[IP].src != pkt[IP].src:
break
if p[IP].dst != pkt[IP].dst:
break
if p[UDP].sport != pkt[UDP].sport:
break
if p[UDP].dport != pkt[UDP].dport:
break
return p
return None
@staticmethod
def _match_route_detail(route_detail, ip, address_length=32, table_id=0):
if route_detail.address == socket.inet_pton(socket.AF_INET, ip):
if route_detail.table_id != table_id:
return False
elif route_detail.address_length != address_length:
return False
else:
return True
else:
return False
def verify_capture(self, dst_interface, received_pkts, expected_pkts):
self.assertEqual(len(received_pkts), len(expected_pkts))
to_verify = list(expected_pkts)
for p in received_pkts:
self.assertEqual(p.src, dst_interface.local_mac)
self.assertEqual(p.dst, dst_interface.remote_mac)
x = self._find_ip_match(to_verify, p)
to_verify.remove(x)
self.assertListEqual(to_verify, [])
def verify_route_dump(self, fib_dump, ips):
def _ip_in_route_dump(ip, fib_dump):
return next((route for route in fib_dump
if self._match_route_detail(route, ip)),
False)
for ip in ips:
self.assertTrue(_ip_in_route_dump(ip, fib_dump),
'IP {} is not in fib dump.'.format(ip))
def verify_not_in_route_dump(self, fib_dump, ips):
def _ip_in_route_dump(ip, fib_dump):
return next((route for route in fib_dump
if self._match_route_detail(route, ip)),
False)
for ip in ips:
self.assertFalse(_ip_in_route_dump(ip, fib_dump),
'IP {} is in fib dump.'.format(ip))
@classmethod
def setUpClass(cls):
"""
#. Create and initialize 3 pg interfaces.
#. initialize class attributes configured_routes and deleted_routes
to store information between tests.
"""
super(TestIPv4FibCrud, cls).setUpClass()
try:
# create 3 pg interfaces
cls.create_pg_interfaces(range(3))
cls.interfaces = list(cls.pg_interfaces)
# setup all interfaces
for i in cls.interfaces:
i.admin_up()
i.config_ip4()
i.resolve_arp()
cls.configured_routes = []
cls.deleted_routes = []
cls.pg_if_packet_sizes = [64, 512, 1518, 9018]
except Exception:
super(TestIPv4FibCrud, cls).tearDownClass()
raise
def setUp(self):
super(TestIPv4FibCrud, self).setUp()
self.reset_packet_infos()
def test_1_add_routes(self):
""" Add 1k routes
- add 100 routes check with traffic script.
"""
# config 1M FIB entries
self.configured_routes.extend(self.config_fib_many_to_one(
"10.0.0.0", self.pg0.remote_ip4, 100))
fib_dump = self.vapi.ip_fib_dump()
self.verify_route_dump(fib_dump, self.configured_routes)
self.stream_1 = self.create_stream(
self.pg1, self.pg0, self.configured_routes, 100)
self.stream_2 = self.create_stream(
self.pg2, self.pg0, self.configured_routes, 100)
self.pg1.add_stream(self.stream_1)
self.pg2.add_stream(self.stream_2)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
pkts = self.pg0.get_capture(len(self.stream_1) + len(self.stream_2))
self.verify_capture(self.pg0, pkts, self.stream_1 + self.stream_2)
def test_2_del_routes(self):
""" Delete 100 routes
- delete 10 routes check with traffic script.
"""
self.deleted_routes.extend(self.unconfig_fib_many_to_one(
"10.0.0.10", self.pg0.remote_ip4, 10))
for x in self.deleted_routes:
self.configured_routes.remove(x)
fib_dump = self.vapi.ip_fib_dump()
self.verify_route_dump(fib_dump, self.configured_routes)
self.stream_1 = self.create_stream(
self.pg1, self.pg0, self.configured_routes, 100)
self.stream_2 = self.create_stream(
self.pg2, self.pg0, self.configured_routes, 100)
self.stream_3 = self.create_stream(
self.pg1, self.pg0, self.deleted_routes, 100)
self.stream_4 = self.create_stream(
self.pg2, self.pg0, self.deleted_routes, 100)
self.pg1.add_stream(self.stream_1 + self.stream_3)
self.pg2.add_stream(self.stream_2 + self.stream_4)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
pkts = self.pg0.get_capture(len(self.stream_1) + len(self.stream_2))
self.verify_capture(self.pg0, pkts, self.stream_1 + self.stream_2)
def test_3_add_new_routes(self):
""" Add 1k routes
- re-add 5 routes check with traffic script.
- add 100 routes check with traffic script.
"""
tmp = self.config_fib_many_to_one(
"10.0.0.10", self.pg0.remote_ip4, 5)
self.configured_routes.extend(tmp)
for x in tmp:
self.deleted_routes.remove(x)
self.configured_routes.extend(self.config_fib_many_to_one(
"10.0.1.0", self.pg0.remote_ip4, 100))
fib_dump = self.vapi.ip_fib_dump()
self.verify_route_dump(fib_dump, self.configured_routes)
self.stream_1 = self.create_stream(
self.pg1, self.pg0, self.configured_routes, 300)
self.stream_2 = self.create_stream(
self.pg2, self.pg0, self.configured_routes, 300)
self.stream_3 = self.create_stream(
self.pg1, self.pg0, self.deleted_routes, 100)
self.stream_4 = self.create_stream(
self.pg2, self.pg0, self.deleted_routes, 100)
self.pg1.add_stream(self.stream_1 + self.stream_3)
self.pg2.add_stream(self.stream_2 + self.stream_4)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
pkts = self.pg0.get_capture(len(self.stream_1) + len(self.stream_2))
self.verify_capture(self.pg0, pkts, self.stream_1 + self.stream_2)
def test_4_del_routes(self):
""" Delete 1.5k routes
- delete 5 routes check with traffic script.
- add 100 routes check with traffic script.
"""
self.deleted_routes.extend(self.unconfig_fib_many_to_one(
"10.0.0.0", self.pg0.remote_ip4, 15))
self.deleted_routes.extend(self.unconfig_fib_many_to_one(
"10.0.0.20", self.pg0.remote_ip4, 85))
self.deleted_routes.extend(self.unconfig_fib_many_to_one(
"10.0.1.0", self.pg0.remote_ip4, 100))
fib_dump = self.vapi.ip_fib_dump()
self.verify_not_in_route_dump(fib_dump, self.deleted_routes)
class TestIPNull(VppTestCase):
""" IPv4 routes via NULL """
def setUp(self):
super(TestIPNull, self).setUp()
# create 2 pg interfaces
self.create_pg_interfaces(range(1))
for i in self.pg_interfaces:
i.admin_up()
i.config_ip4()
i.resolve_arp()
def tearDown(self):
super(TestIPNull, self).tearDown()
for i in self.pg_interfaces:
i.unconfig_ip4()
i.admin_down()
def test_ip_null(self):
""" IP NULL route """
#
# A route via IP NULL that will reply with ICMP unreachables
#
ip_unreach = VppIpRoute(self, "10.0.0.1", 32, [], is_unreach=1)
ip_unreach.add_vpp_config()
p_unreach = (Ether(src=self.pg0.remote_mac,
dst=self.pg0.local_mac) /
IP(src=self.pg0.remote_ip4, dst="10.0.0.1") /
UDP(sport=1234, dport=1234) /
Raw('\xa5' * 100))
self.pg0.add_stream(p_unreach)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx = self.pg0.get_capture(1)
rx = rx[0]
icmp = rx[ICMP]
self.assertEqual(icmptypes[icmp.type], "dest-unreach")
self.assertEqual(icmpcodes[icmp.type][icmp.code], "host-unreachable")
self.assertEqual(icmp.src, self.pg0.remote_ip4)
self.assertEqual(icmp.dst, "10.0.0.1")
#
# ICMP replies are rate limited. so sit and spin.
#
self.sleep(1)
#
# A route via IP NULL that will reply with ICMP prohibited
#
ip_prohibit = VppIpRoute(self, "10.0.0.2", 32, [], is_prohibit=1)
ip_prohibit.add_vpp_config()
p_prohibit = (Ether(src=self.pg0.remote_mac,
dst=self.pg0.local_mac) /
IP(src=self.pg0.remote_ip4, dst="10.0.0.2") /
UDP(sport=1234, dport=1234) /
Raw('\xa5' * 100))
self.pg0.add_stream(p_prohibit)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx = self.pg0.get_capture(1)
rx = rx[0]
icmp = rx[ICMP]
self.assertEqual(icmptypes[icmp.type], "dest-unreach")
self.assertEqual(icmpcodes[icmp.type][icmp.code], "host-prohibited")
self.assertEqual(icmp.src, self.pg0.remote_ip4)
self.assertEqual(icmp.dst, "10.0.0.2")
class TestIPDisabled(VppTestCase):
""" IPv4 disabled """
def setUp(self):
super(TestIPDisabled, self).setUp()
# create 2 pg interfaces
self.create_pg_interfaces(range(2))
# PG0 is IP enalbed
self.pg0.admin_up()
self.pg0.config_ip4()
self.pg0.resolve_arp()
# PG 1 is not IP enabled
self.pg1.admin_up()
def tearDown(self):
super(TestIPDisabled, self).tearDown()
for i in self.pg_interfaces:
i.unconfig_ip4()
i.admin_down()
def test_ip_disabled(self):
""" IP Disabled """
#
# An (S,G).
# one accepting interface, pg0, 2 forwarding interfaces
#
route_232_1_1_1 = VppIpMRoute(
self,
"0.0.0.0",
"232.1.1.1", 32,
MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE,
[VppMRoutePath(self.pg1.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT),
VppMRoutePath(self.pg0.sw_if_index,
MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)])
route_232_1_1_1.add_vpp_config()
pu = (Ether(src=self.pg1.remote_mac,
dst=self.pg1.local_mac) /
IP(src="10.10.10.10", dst=self.pg0.remote_ip4) /
UDP(sport=1234, dport=1234) /
Raw('\xa5' * 100))
pm = (Ether(src=self.pg1.remote_mac,
dst=self.pg1.local_mac) /
IP(src="10.10.10.10", dst="232.1.1.1") /
UDP(sport=1234, dport=1234) /
Raw('\xa5' * 100))
#
# PG1 does not forward IP traffic
#
self.send_and_assert_no_replies(self.pg1, pu, "IP disabled")
self.send_and_assert_no_replies(self.pg1, pm, "IP disabled")
#
# IP enable PG1
#
self.pg1.config_ip4()
#
# Now we get packets through
#
self.pg1.add_stream(pu)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx = self.pg0.get_capture(1)
self.pg1.add_stream(pm)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx = self.pg0.get_capture(1)
#
# Disable PG1
#
self.pg1.unconfig_ip4()
#
# PG1 does not forward IP traffic
#
self.send_and_assert_no_replies(self.pg1, pu, "IP disabled")
self.send_and_assert_no_replies(self.pg1, pm, "IP disabled")
class TestIPSubNets(VppTestCase):
""" IPv4 Subnets """
def setUp(self):
super(TestIPSubNets, self).setUp()
# create a 2 pg interfaces
self.create_pg_interfaces(range(2))
# pg0 we will use to experiemnt
self.pg0.admin_up()
# pg1 is setup normally
self.pg1.admin_up()
self.pg1.config_ip4()
self.pg1.resolve_arp()
def tearDown(self):
super(TestIPSubNets, self).tearDown()
for i in self.pg_interfaces:
i.admin_down()
def test_ip_sub_nets(self):
""" IP Sub Nets """
#
# Configure a covering route to forward so we know
# when we are dropping
#
cover_route = VppIpRoute(self, "10.0.0.0", 8,
[VppRoutePath(self.pg1.remote_ip4,
self.pg1.sw_if_index)])
cover_route.add_vpp_config()
p = (Ether(src=self.pg1.remote_mac,
dst=self.pg1.local_mac) /
IP(dst="10.10.10.10", src=self.pg0.local_ip4) /
UDP(sport=1234, dport=1234) /
Raw('\xa5' * 100))
self.pg1.add_stream(p)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx = self.pg1.get_capture(1)
#
# Configure some non-/24 subnets on an IP interface
#
ip_addr_n = socket.inet_pton(socket.AF_INET, "10.10.10.10")
self.vapi.sw_interface_add_del_address(self.pg0.sw_if_index,
ip_addr_n,
16)
pn = (Ether(src=self.pg1.remote_mac,
dst=self.pg1.local_mac) /
IP(dst="10.10.0.0", src=self.pg0.local_ip4) /
UDP(sport=1234, dport=1234) /
Raw('\xa5' * 100))
pb = (Ether(src=self.pg1.remote_mac,
dst=self.pg1.local_mac) /
IP(dst="10.10.255.255", src=self.pg0.local_ip4) /
UDP(sport=1234, dport=1234) /
Raw('\xa5' * 100))
self.send_and_assert_no_replies(self.pg1, pn, "IP Network address")
self.send_and_assert_no_replies(self.pg1, pb, "IP Broadcast address")
# remove the sub-net and we are forwarding via the cover again
self.vapi.sw_interface_add_del_address(self.pg0.sw_if_index,
ip_addr_n,
16,
is_add=0)
self.pg1.add_stream(pn)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx = self.pg1.get_capture(1)
self.pg1.add_stream(pb)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx = self.pg1.get_capture(1)
#
# A /31 is a special case where the 'other-side' is an attached host
# packets to that peer generate ARP requests
#
ip_addr_n = socket.inet_pton(socket.AF_INET, "10.10.10.10")
self.vapi.sw_interface_add_del_address(self.pg0.sw_if_index,
ip_addr_n,
31)
pn = (Ether(src=self.pg1.remote_mac,
dst=self.pg1.local_mac) /
IP(dst="10.10.10.11", src=self.pg0.local_ip4) /
UDP(sport=1234, dport=1234) /
Raw('\xa5' * 100))
self.pg1.add_stream(pn)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx = self.pg0.get_capture(1)
rx[ARP]
# remove the sub-net and we are forwarding via the cover again
self.vapi.sw_interface_add_del_address(self.pg0.sw_if_index,
ip_addr_n,
31,
is_add=0)
self.pg1.add_stream(pn)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx = self.pg1.get_capture(1)
class TestIPLoadBalance(VppTestCase):
""" IPv4 Load-Balancing """
def setUp(self):
super(TestIPLoadBalance, self).setUp()
self.create_pg_interfaces(range(5))
mpls_tbl = VppMplsTable(self, 0)
mpls_tbl.add_vpp_config()
for i in self.pg_interfaces:
i.admin_up()
i.config_ip4()
i.resolve_arp()
i.enable_mpls()
def tearDown(self):
for i in self.pg_interfaces:
i.disable_mpls()
i.unconfig_ip4()
i.admin_down()
super(TestIPLoadBalance, self).tearDown()
def send_and_expect_load_balancing(self, input, pkts, outputs):
input.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
for oo in outputs:
rx = oo._get_capture(1)
self.assertNotEqual(0, len(rx))
def send_and_expect_one_itf(self, input, pkts, itf):
input.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
rx = itf.get_capture(len(pkts))
def test_ip_load_balance(self):
""" IP Load-Balancing """
#
# An array of packets that differ only in the destination port
#
port_ip_pkts = []
port_mpls_pkts = []
#
# An array of packets that differ only in the source address
#
src_ip_pkts = []
src_mpls_pkts = []
for ii in range(65):
port_ip_hdr = (IP(dst="10.0.0.1", src="20.0.0.1") /
UDP(sport=1234, dport=1234 + ii) /
Raw('\xa5' * 100))
port_ip_pkts.append((Ether(src=self.pg0.remote_mac,
dst=self.pg0.local_mac) /
port_ip_hdr))
port_mpls_pkts.append((Ether(src=self.pg0.remote_mac,
dst=self.pg0.local_mac) /
MPLS(label=66, ttl=2) /
port_ip_hdr))
src_ip_hdr = (IP(dst="10.0.0.1", src="20.0.0.%d" % ii) /
UDP(sport=1234, dport=1234) /
Raw('\xa5' * 100))
src_ip_pkts.append((Ether(src=self.pg0.remote_mac,
dst=self.pg0.local_mac) /
src_ip_hdr))
src_mpls_pkts.append((Ether(src=self.pg0.remote_mac,
dst=self.pg0.local_mac) /
MPLS(label=66, ttl=2) /
src_ip_hdr))
route_10_0_0_1 = VppIpRoute(self, "10.0.0.1", 32,
[VppRoutePath(self.pg1.remote_ip4,
self.pg1.sw_if_index),
VppRoutePath(self.pg2.remote_ip4,
self.pg2.sw_if_index)])
route_10_0_0_1.add_vpp_config()
binding = VppMplsIpBind(self, 66, "10.0.0.1", 32)
binding.add_vpp_config()
#
# inject the packet on pg0 - expect load-balancing across the 2 paths
# - since the default hash config is to use IP src,dst and port
# src,dst
# We are not going to ensure equal amounts of packets across each link,
# since the hash algorithm is statistical and therefore this can never
# be guaranteed. But wuth 64 different packets we do expect some
# balancing. So instead just ensure there is traffic on each link.
#
self.send_and_expect_load_balancing(self.pg0, port_ip_pkts,
[self.pg1, self.pg2])
self.send_and_expect_load_balancing(self.pg0, src_ip_pkts,
[self.pg1, self.pg2])
self.send_and_expect_load_balancing(self.pg0, port_mpls_pkts,
[self.pg1, self.pg2])
self.send_and_expect_load_balancing(self.pg0, src_mpls_pkts,
[self.pg1, self.pg2])
#
# change the flow hash config so it's only IP src,dst
# - now only the stream with differing source address will
# load-balance
#
self.vapi.set_ip_flow_hash(0, src=1, dst=1, sport=0, dport=0)
self.send_and_expect_load_balancing(self.pg0, src_ip_pkts,
[self.pg1, self.pg2])
self.send_and_expect_load_balancing(self.pg0, src_mpls_pkts,
[self.pg1, self.pg2])
self.send_and_expect_one_itf(self.pg0, port_ip_pkts, self.pg2)
#
# change the flow hash config back to defaults
#
self.vapi.set_ip_flow_hash(0, src=1, dst=1, sport=1, dport=1)
#
# Recursive prefixes
# - testing that 2 stages of load-balancing occurs and there is no
# polarisation (i.e. only 2 of 4 paths are used)
#
port_pkts = []
src_pkts = []
for ii in range(257):
port_pkts.append((Ether(src=self.pg0.remote_mac,
dst=self.pg0.local_mac) /
IP(dst="1.1.1.1", src="20.0.0.1") /
UDP(sport=1234, dport=1234 + ii) /
Raw('\xa5' * 100)))
src_pkts.append((Ether(src=self.pg0.remote_mac,
dst=self.pg0.local_mac) /
IP(dst="1.1.1.1", src="20.0.0.%d" % ii) /
UDP(sport=1234, dport=1234) /
Raw('\xa5' * 100)))
route_10_0_0_2 = VppIpRoute(self, "10.0.0.2", 32,
[VppRoutePath(self.pg3.remote_ip4,
self.pg3.sw_if_index),
VppRoutePath(self.pg4.remote_ip4,
self.pg4.sw_if_index)])
route_10_0_0_2.add_vpp_config()
route_1_1_1_1 = VppIpRoute(self, "1.1.1.1", 32,
[VppRoutePath("10.0.0.2", 0xffffffff),
VppRoutePath("10.0.0.1", 0xffffffff)])
route_1_1_1_1.add_vpp_config()
#
# inject the packet on pg0 - expect load-balancing across all 4 paths
#
self.vapi.cli("clear trace")
self.send_and_expect_load_balancing(self.pg0, port_pkts,
[self.pg1, self.pg2,
self.pg3, self.pg4])
self.send_and_expect_load_balancing(self.pg0, src_pkts,
[self.pg1, self.pg2,
self.pg3, self.pg4])
#
# Recursive prefixes
# - testing that 2 stages of load-balancing, no choices
#
port_pkts = []
for ii in range(257):
port_pkts.append((Ether(src=self.pg0.remote_mac,
dst=self.pg0.local_mac) /
IP(dst="1.1.1.2", src="20.0.0.2") /
UDP(sport=1234, dport=1234 + ii) /
Raw('\xa5' * 100)))
route_10_0_0_3 = VppIpRoute(self, "10.0.0.3", 32,
[VppRoutePath(self.pg3.remote_ip4,
self.pg3.sw_if_index)])
route_10_0_0_3.add_vpp_config()
route_1_1_1_2 = VppIpRoute(self, "1.1.1.2", 32,
[VppRoutePath("10.0.0.3", 0xffffffff)])
route_1_1_1_2.add_vpp_config()
#
# inject the packet on pg0 - expect load-balancing across all 4 paths
#
self.vapi.cli("clear trace")
self.send_and_expect_one_itf(self.pg0, port_pkts, self.pg3)
class TestIPVlan0(VppTestCase):
""" IPv4 VLAN-0 """
def setUp(self):
super(TestIPVlan0, self).setUp()
self.create_pg_interfaces(range(2))
mpls_tbl = VppMplsTable(self, 0)
mpls_tbl.add_vpp_config()
for i in self.pg_interfaces:
i.admin_up()
i.config_ip4()
i.resolve_arp()
i.enable_mpls()
def tearDown(self):
for i in self.pg_interfaces:
i.disable_mpls()
i.unconfig_ip4()
i.admin_down()
super(TestIPVlan0, self).tearDown()
def test_ip_vlan_0(self):
""" IP VLAN-0 """
pkts = (Ether(src=self.pg0.remote_mac,
dst=self.pg0.local_mac) /
Dot1Q(vlan=0) /
IP(dst=self.pg1.remote_ip4,
src=self.pg0.remote_ip4) /
UDP(sport=1234, dport=1234) /
Raw('\xa5' * 100)) * 65
#
# Expect that packets sent on VLAN-0 are forwarded on the
# main interface.
#
self.send_and_expect(self.pg0, pkts, self.pg1)
class TestIPPunt(VppTestCase):
""" IPv4 Punt Police/Redirect """
def setUp(self):
super(TestIPPunt, self).setUp()
self.create_pg_interfaces(range(2))
for i in self.pg_interfaces:
i.admin_up()
i.config_ip4()
i.resolve_arp()
def tearDown(self):
super(TestIPPunt, self).tearDown()
for i in self.pg_interfaces:
i.unconfig_ip4()
i.admin_down()
def test_ip_punt(self):
""" IP punt police and redirect """
p = (Ether(src=self.pg0.remote_mac,
dst=self.pg0.local_mac) /
IP(src=self.pg0.remote_ip4, dst=self.pg0.local_ip4) /
TCP(sport=1234, dport=1234) /
Raw('\xa5' * 100))
pkts = p * 1025
#
# Configure a punt redirect via pg1.
#
nh_addr = socket.inet_pton(socket.AF_INET,
self.pg1.remote_ip4)
self.vapi.ip_punt_redirect(self.pg0.sw_if_index,
self.pg1.sw_if_index,
nh_addr)
self.send_and_expect(self.pg0, pkts, self.pg1)
#
# add a policer
#
policer = self.vapi.policer_add_del("ip4-punt", 400, 0, 10, 0,
rate_type=1)
self.vapi.ip_punt_police(policer.policer_index)
self.vapi.cli("clear trace")
self.pg0.add_stream(pkts)
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
#
# the number of packet recieved should be greater than 0,
# but not equal to the number sent, since some were policed
#
rx = self.pg1._get_capture(1)
self.assertTrue(len(rx) > 0)
self.assertTrue(len(rx) < len(pkts))
#
# remove the poilcer. back to full rx
#
self.vapi.ip_punt_police(policer.policer_index, is_add=0)
self.vapi.policer_add_del("ip4-punt", 400, 0, 10, 0,
rate_type=1, is_add=0)
self.send_and_expect(self.pg0, pkts, self.pg1)
#
# remove the redirect. expect full drop.
#
self.vapi.ip_punt_redirect(self.pg0.sw_if_index,
self.pg1.sw_if_index,
nh_addr,
is_add=0)
self.send_and_assert_no_replies(self.pg0, pkts,
"IP no punt config")
#
# Add a redirect that is not input port selective
#
self.vapi.ip_punt_redirect(0xffffffff,
self.pg1.sw_if_index,
nh_addr)
self.send_and_expect(self.pg0, pkts, self.pg1)
self.vapi.ip_punt_redirect(0xffffffff,
self.pg1.sw_if_index,
nh_addr,
is_add=0)
class TestIPDeag(VppTestCase):
""" IPv4 Deaggregate Routes """
def setUp(self):
super(TestIPDeag, self).setUp()
self.create_pg_interfaces(range(3))
for i in self.pg_interfaces:
i.admin_up()
i.config_ip4()
i.resolve_arp()
def tearDown(self):
super(TestIPDeag, self).tearDown()
for i in self.pg_interfaces:
i.unconfig_ip4()
i.admin_down()
def test_ip_deag(self):
""" IP Deag Routes """
#
# Create a table to be used for:
# 1 - another destination address lookup
# 2 - a source address lookup
#
table_dst = VppIpTable(self, 1)
table_src = VppIpTable(self, 2)
table_dst.add_vpp_config()
table_src.add_vpp_config()
#
# Add a route in the default table to point to a deag/
# second lookup in each of these tables
#
route_to_dst = VppIpRoute(self, "1.1.1.1", 32,
[VppRoutePath("0.0.0.0",
0xffffffff,
nh_table_id=1)])
route_to_src = VppIpRoute(self, "1.1.1.2", 32,
[VppRoutePath("0.0.0.0",
0xffffffff,
nh_table_id=2,
is_source_lookup=1)])
route_to_dst.add_vpp_config()
route_to_src.add_vpp_config()
#
# packets to these destination are dropped, since they'll
# hit the respective default routes in the second table
#
p_dst = (Ether(src=self.pg0.remote_mac,
dst=self.pg0.local_mac) /
IP(src="5.5.5.5", dst="1.1.1.1") /
TCP(sport=1234, dport=1234) /
Raw('\xa5' * 100))
p_src = (Ether(src=self.pg0.remote_mac,
dst=self.pg0.local_mac) /
IP(src="2.2.2.2", dst="1.1.1.2") /
TCP(sport=1234, dport=1234) /
Raw('\xa5' * 100))
pkts_dst = p_dst * 257
pkts_src = p_src * 257
self.send_and_assert_no_replies(self.pg0, pkts_dst,
"IP in dst table")
self.send_and_assert_no_replies(self.pg0, pkts_src,
"IP in src table")
#
# add a route in the dst table to forward via pg1
#
route_in_dst = VppIpRoute(self, "1.1.1.1", 32,
[VppRoutePath(self.pg1.remote_ip4,
self.pg1.sw_if_index)],
table_id=1)
route_in_dst.add_vpp_config()
self.send_and_expect(self.pg0, pkts_dst, self.pg1)
#
# add a route in the src table to forward via pg2
#
route_in_src = VppIpRoute(self, "2.2.2.2", 32,
[VppRoutePath(self.pg2.remote_ip4,
self.pg2.sw_if_index)],
table_id=2)
route_in_src.add_vpp_config()
self.send_and_expect(self.pg0, pkts_src, self.pg2)
class TestIPInput(VppTestCase):
""" IPv4 Input Exceptions """
def setUp(self):
super(TestIPInput, self).setUp()
self.create_pg_interfaces(range(2))
for i in self.pg_interfaces:
i.admin_up()
i.config_ip4()
i.resolve_arp()
def tearDown(self):
super(TestIPInput, self).tearDown()
for i in self.pg_interfaces:
i.unconfig_ip4()
i.admin_down()
def test_ip_input(self):
""" IP Input Exceptions """
# i can't find a way in scapy to construct an IP packet
# with a length less than the IP header length
#
# Packet too short - this is forwarded
#
p_short = (Ether(src=self.pg0.remote_mac,
dst=self.pg0.local_mac) /
IP(src=self.pg0.remote_ip4,
dst=self.pg1.remote_ip4,
len=40) /
UDP(sport=1234, dport=1234) /
Raw('\xa5' * 100))
rx = self.send_and_expect(self.pg0, p_short * 65, self.pg1)
#
# Packet too long - this is dropped
#
p_long = (Ether(src=self.pg0.remote_mac,
dst=self.pg0.local_mac) /
IP(src=self.pg0.remote_ip4,
dst=self.pg1.remote_ip4,
len=400) /
UDP(sport=1234, dport=1234) /
Raw('\xa5' * 100))
rx = self.send_and_assert_no_replies(self.pg0, p_long * 65,
"too long")
#
# bad chksum - this is dropped
#
p_chksum = (Ether(src=self.pg0.remote_mac,
dst=self.pg0.local_mac) /
IP(src=self.pg0.remote_ip4,
dst=self.pg1.remote_ip4,
chksum=400) /
UDP(sport=1234, dport=1234) /
Raw('\xa5' * 100))
rx = self.send_and_assert_no_replies(self.pg0, p_chksum * 65,
"bad checksum")
#
# bad version - this is dropped
#
p_ver = (Ether(src=self.pg0.remote_mac,
dst=self.pg0.local_mac) /
IP(src=self.pg0.remote_ip4,
dst=self.pg1.remote_ip4,
version=3) /
UDP(sport=1234, dport=1234) /
Raw('\xa5' * 100))
rx = self.send_and_assert_no_replies(self.pg0, p_ver * 65,
"funky version")
#
# fragment offset 1 - this is dropped
#
p_frag = (Ether(src=self.pg0.remote_mac,
dst=self.pg0.local_mac) /
IP(src=self.pg0.remote_ip4,
dst=self.pg1.remote_ip4,
frag=1) /
UDP(sport=1234, dport=1234) /
Raw('\xa5' * 100))
rx = self.send_and_assert_no_replies(self.pg0, p_frag * 65,
"frag offset")
#
# TTL expired packet
#
p_ttl = (Ether(src=self.pg0.remote_mac,
dst=self.pg0.local_mac) /
IP(src=self.pg0.remote_ip4,
dst=self.pg1.remote_ip4,
ttl=1) /
UDP(sport=1234, dport=1234) /
Raw('\xa5' * 100))
rx = self.send_and_expect(self.pg0, p_ttl * 65, self.pg0)
rx = rx[0]
icmp = rx[ICMP]
self.assertEqual(icmptypes[icmp.type], "time-exceeded")
self.assertEqual(icmpcodes[icmp.type][icmp.code],
"ttl-zero-during-transit")
self.assertEqual(icmp.src, self.pg0.remote_ip4)
self.assertEqual(icmp.dst, self.pg1.remote_ip4)
#
# MTU exceeded
#
p_mtu = (Ether(src=self.pg0.remote_mac,
dst=self.pg0.local_mac) /
IP(src=self.pg0.remote_ip4,
dst=self.pg1.remote_ip4,
ttl=10) /
UDP(sport=1234, dport=1234) /
Raw('\xa5' * 2000))
self.vapi.sw_interface_set_mtu(self.pg1.sw_if_index, 1500)
rx = self.send_and_expect(self.pg0, p_mtu * 65, self.pg0)
rx = rx[0]
icmp = rx[ICMP]
self.assertEqual(icmptypes[icmp.type], "dest-unreach")
self.assertEqual(icmpcodes[icmp.type][icmp.code],
"fragmentation-needed")
self.assertEqual(icmp.src, self.pg0.remote_ip4)
self.assertEqual(icmp.dst, self.pg1.remote_ip4)
self.vapi.sw_interface_set_mtu(self.pg1.sw_if_index, 2500)
rx = self.send_and_expect(self.pg0, p_mtu * 65, self.pg1)
if __name__ == '__main__':
unittest.main(testRunner=VppTestRunner)
|
the-stack_0_18728 | from django.shortcuts import reverse
from questionbank.questions.mixins import ChoiceFormMixin
def test_choice_form_mixin():
"""
calling ChoiceFormMixin.get_success_url() should redirect to question
detail page
"""
mixin = ChoiceFormMixin()
mixin.kwargs = {'question': 1}
url = mixin.get_success_url()
assert url == reverse('questions:detail', kwargs={'pk': 1})
|
the-stack_0_18729 | # Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import re
import stat
import subprocess
import jinja2
from oslo_config import cfg
from oslo_log import log as logging
from octavia.amphorae.backends.agent.api_server import osutils
from octavia.amphorae.backends.utils import ip_advertisement
from octavia.amphorae.backends.utils import network_utils
from octavia.common import constants as consts
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
FRONTEND_BACKEND_PATTERN = re.compile(r'\n(frontend|backend)\s+(\S+)\n')
LISTENER_MODE_PATTERN = re.compile(r'^\s+mode\s+(.*)$', re.MULTILINE)
TLS_CERT_PATTERN = re.compile(r'^\s+bind\s+\S+\s+ssl crt-list\s+(\S*)',
re.MULTILINE)
STATS_SOCKET_PATTERN = re.compile(r'stats socket\s+(\S+)')
class ParsingError(Exception):
pass
class UnknownInitError(Exception):
pass
def init_path(lb_id, init_system):
if init_system == consts.INIT_SYSTEMD:
return os.path.join(consts.SYSTEMD_DIR,
'haproxy-{0}.service'.format(lb_id))
if init_system == consts.INIT_UPSTART:
return os.path.join(consts.UPSTART_DIR,
'haproxy-{0}.conf'.format(lb_id))
if init_system == consts.INIT_SYSVINIT:
return os.path.join(consts.SYSVINIT_DIR,
'haproxy-{0}'.format(lb_id))
raise UnknownInitError()
def keepalived_lvs_dir():
return os.path.join(CONF.haproxy_amphora.base_path, 'lvs')
def keepalived_lvs_init_path(init_system, listener_id):
if init_system == consts.INIT_SYSTEMD:
return os.path.join(consts.SYSTEMD_DIR,
consts.KEEPALIVED_SYSTEMD_PREFIX %
str(listener_id))
if init_system == consts.INIT_UPSTART:
return os.path.join(consts.UPSTART_DIR,
consts.KEEPALIVED_UPSTART_PREFIX %
str(listener_id))
if init_system == consts.INIT_SYSVINIT:
return os.path.join(consts.SYSVINIT_DIR,
consts.KEEPALIVED_SYSVINIT_PREFIX %
str(listener_id))
raise UnknownInitError()
def keepalived_backend_check_script_dir():
return os.path.join(CONF.haproxy_amphora.base_path, 'lvs/check/')
def keepalived_backend_check_script_path():
return os.path.join(keepalived_backend_check_script_dir(),
'udp_check.sh')
def keepalived_lvs_pids_path(listener_id):
pids_path = {}
for file_ext in ['pid', 'vrrp.pid', 'check.pid']:
pids_path[file_ext] = (
os.path.join(CONF.haproxy_amphora.base_path,
('lvs/octavia-keepalivedlvs-%s.%s') %
(str(listener_id), file_ext)))
return pids_path['pid'], pids_path['vrrp.pid'], pids_path['check.pid']
def keepalived_lvs_cfg_path(listener_id):
return os.path.join(CONF.haproxy_amphora.base_path,
('lvs/octavia-keepalivedlvs-%s.conf') %
str(listener_id))
def haproxy_dir(lb_id):
return os.path.join(CONF.haproxy_amphora.base_path, lb_id)
def pid_path(lb_id):
return os.path.join(haproxy_dir(lb_id), lb_id + '.pid')
def config_path(lb_id):
return os.path.join(haproxy_dir(lb_id), 'haproxy.cfg')
def get_haproxy_pid(lb_id):
with open(pid_path(lb_id), 'r') as f:
return f.readline().rstrip()
def get_keepalivedlvs_pid(listener_id):
pid_file = keepalived_lvs_pids_path(listener_id)[0]
with open(pid_file, 'r') as f:
return f.readline().rstrip()
def haproxy_sock_path(lb_id):
return os.path.join(CONF.haproxy_amphora.base_path, lb_id + '.sock')
def haproxy_check_script_path():
return os.path.join(keepalived_check_scripts_dir(),
'haproxy_check_script.sh')
def keepalived_dir():
return os.path.join(CONF.haproxy_amphora.base_path, 'vrrp')
def keepalived_init_path(init_system):
if init_system == consts.INIT_SYSTEMD:
return os.path.join(consts.SYSTEMD_DIR, consts.KEEPALIVED_SYSTEMD)
if init_system == consts.INIT_UPSTART:
return os.path.join(consts.UPSTART_DIR, consts.KEEPALIVED_UPSTART)
if init_system == consts.INIT_SYSVINIT:
return os.path.join(consts.SYSVINIT_DIR, consts.KEEPALIVED_SYSVINIT)
raise UnknownInitError()
def keepalived_pid_path():
return os.path.join(CONF.haproxy_amphora.base_path,
'vrrp/octavia-keepalived.pid')
def keepalived_cfg_path():
return os.path.join(CONF.haproxy_amphora.base_path,
'vrrp/octavia-keepalived.conf')
def keepalived_log_path():
return os.path.join(CONF.haproxy_amphora.base_path,
'vrrp/octavia-keepalived.log')
def keepalived_check_scripts_dir():
return os.path.join(CONF.haproxy_amphora.base_path,
'vrrp/check_scripts')
def keepalived_check_script_path():
return os.path.join(CONF.haproxy_amphora.base_path,
'vrrp/check_script.sh')
def get_listeners():
"""Get Listeners
:returns: An array with the ids of all listeners, e.g. ['123', '456', ...]
or [] if no listeners exist
"""
listeners = []
for lb_id in get_loadbalancers():
listeners_on_lb = parse_haproxy_file(lb_id)[1]
listeners.extend(list(listeners_on_lb.keys()))
return listeners
def get_loadbalancers():
"""Get Load balancers
:returns: An array with the uuids of all load balancers,
e.g. ['123', '456', ...] or [] if no loadbalancers exist
"""
if os.path.exists(CONF.haproxy_amphora.base_path):
return [f for f in os.listdir(CONF.haproxy_amphora.base_path)
if os.path.exists(config_path(f))]
return []
def is_lb_running(lb_id):
return os.path.exists(pid_path(lb_id)) and os.path.exists(
os.path.join('/proc', get_haproxy_pid(lb_id)))
def get_udp_listeners():
result = []
if os.path.exists(keepalived_lvs_dir()):
for f in os.listdir(keepalived_lvs_dir()):
if f.endswith('.conf'):
prefix = f.split('.')[0]
if re.search("octavia-keepalivedlvs-", prefix):
result.append(f.split(
'octavia-keepalivedlvs-')[1].split('.')[0])
return result
def is_udp_listener_running(listener_id):
pid_file = keepalived_lvs_pids_path(listener_id)[0]
return os.path.exists(pid_file) and os.path.exists(
os.path.join('/proc', get_keepalivedlvs_pid(listener_id)))
def get_os_init_system():
if os.path.exists(consts.INIT_PROC_COMM_PATH):
with open(consts.INIT_PROC_COMM_PATH, 'r') as init_comm:
init_proc_name = init_comm.read().rstrip('\n')
if init_proc_name == consts.INIT_SYSTEMD:
return consts.INIT_SYSTEMD
if init_proc_name == 'init':
init_path = consts.INIT_PATH
if os.path.exists(init_path):
args = [init_path, '--version']
init_version = subprocess.check_output(args, shell=False)
if consts.INIT_UPSTART in str(init_version, 'utf-8'):
return consts.INIT_UPSTART
return consts.INIT_SYSVINIT
return consts.INIT_UNKOWN
def install_netns_systemd_service():
os_utils = osutils.BaseOS.get_os_util()
flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC
# mode 00644
mode = (stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH)
# TODO(bcafarel): implement this for other init systems
# netns handling depends on a separate unit file
netns_path = os.path.join(consts.SYSTEMD_DIR,
consts.AMP_NETNS_SVC_PREFIX + '.service')
jinja_env = jinja2.Environment(
autoescape=True, loader=jinja2.FileSystemLoader(os.path.dirname(
os.path.realpath(__file__)
) + consts.AGENT_API_TEMPLATES))
if not os.path.exists(netns_path):
with os.fdopen(os.open(netns_path, flags, mode), 'w') as text_file:
text = jinja_env.get_template(
consts.AMP_NETNS_SVC_PREFIX + '.systemd.j2').render(
amphora_nsname=consts.AMPHORA_NAMESPACE,
HasIFUPAll=os_utils.has_ifup_all())
text_file.write(text)
def run_systemctl_command(command, service):
cmd = "systemctl {cmd} {srvc}".format(cmd=command, srvc=service)
try:
subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
LOG.error("Failed to %(cmd)s %(srvc)s service: "
"%(err)s %(out)s", {'cmd': command, 'srvc': service,
'err': e, 'out': e.output})
def get_protocol_for_lb_object(object_id):
"""Returns the L4 protocol for a listener.
If the listener is a TCP based listener (haproxy) return TCP.
If the listener is a UDP based listener (lvs) return UDP.
If the listener is not identifiable, return None.
:param listener_id: The ID of the listener to identify.
:returns: TCP, UDP, or None
"""
if os.path.exists(config_path(object_id)):
return consts.PROTOCOL_TCP
if os.path.exists(keepalived_lvs_cfg_path(object_id)):
return consts.PROTOCOL_UDP
return None
def parse_haproxy_file(lb_id):
with open(config_path(lb_id), 'r') as file:
cfg = file.read()
listeners = {}
m = FRONTEND_BACKEND_PATTERN.split(cfg)
last_token = None
last_id = None
for section in m:
if last_token is None:
# We aren't in a section yet, see if this line starts one
if section == 'frontend':
last_token = section
elif last_token == 'frontend':
# We're in a frontend section, save the id for later
last_token = last_token + "_id"
last_id = section
elif last_token == 'frontend_id':
# We're in a frontend section and already have the id
# Look for the mode
mode_matcher = LISTENER_MODE_PATTERN.search(section)
if not mode_matcher:
raise ParsingError()
listeners[last_id] = {
'mode': mode_matcher.group(1).upper(),
}
# Now see if this is a TLS frontend
tls_matcher = TLS_CERT_PATTERN.search(section)
if tls_matcher:
# TODO(rm_work): Can't we have terminated tcp?
listeners[last_id]['mode'] = 'TERMINATED_HTTPS'
listeners[last_id]['ssl_crt'] = tls_matcher.group(1)
# Clear out the token and id and start over
last_token = last_id = None
m = STATS_SOCKET_PATTERN.search(cfg)
if not m:
raise ParsingError()
stats_socket = m.group(1)
return stats_socket, listeners
def vrrp_check_script_update(lb_id, action):
os.makedirs(keepalived_dir(), exist_ok=True)
os.makedirs(keepalived_check_scripts_dir(), exist_ok=True)
lb_ids = get_loadbalancers()
udp_ids = get_udp_listeners()
# If no LBs are found, so make sure keepalived thinks haproxy is down.
if not lb_ids:
if not udp_ids:
with open(haproxy_check_script_path(), 'w') as text_file:
text_file.write('exit 1')
else:
try:
LOG.debug("Attempting to remove old haproxy check script...")
os.remove(haproxy_check_script_path())
LOG.debug("Finished removing old haproxy check script.")
except FileNotFoundError:
LOG.debug("No haproxy check script to remove.")
return
if action == consts.AMP_ACTION_STOP:
lb_ids.remove(lb_id)
args = []
for lbid in lb_ids:
args.append(haproxy_sock_path(lbid))
cmd = 'haproxy-vrrp-check {args}; exit $?'.format(args=' '.join(args))
with open(haproxy_check_script_path(), 'w') as text_file:
text_file.write(cmd)
def get_haproxy_vip_addresses(lb_id):
"""Get the VIP addresses for a load balancer.
:param lb_id: The load balancer ID to get VIP addresses from.
:returns: List of VIP addresses (IPv4 and IPv6)
"""
vips = []
with open(config_path(lb_id), 'r') as file:
for line in file:
current_line = line.strip()
if current_line.startswith('bind'):
for section in current_line.split(' '):
# We will always have a port assigned per the template.
if ':' in section:
if ',' in section:
addr_port = section.rstrip(',')
vips.append(addr_port.rpartition(':')[0])
else:
vips.append(section.rpartition(':')[0])
break
return vips
def send_vip_advertisements(lb_id):
"""Sends address advertisements for each load balancer VIP.
This method will send either GARP (IPv4) or neighbor advertisements (IPv6)
for the VIP addresses on a load balancer.
:param lb_id: The load balancer ID to send advertisements for.
:returns: None
"""
try:
vips = get_haproxy_vip_addresses(lb_id)
for vip in vips:
interface = network_utils.get_interface_name(
vip, net_ns=consts.AMPHORA_NAMESPACE)
ip_advertisement.send_ip_advertisement(
interface, vip, net_ns=consts.AMPHORA_NAMESPACE)
except Exception as e:
LOG.debug('Send VIP advertisement failed due to :%s. '
'This amphora may not be the MASTER. Ignoring.', str(e))
|
the-stack_0_18733 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import argparse
import logging
from common import _utils
def create_parser():
parser = argparse.ArgumentParser(description='SageMaker Training Job')
_utils.add_default_client_arguments(parser)
parser.add_argument('--endpoint_config_name', type=str, required=False, help='The name of the endpoint configuration.', default='')
parser.add_argument('--variant_name_1', type=str, required=False, help='The name of the production variant.', default='variant-name-1')
parser.add_argument('--model_name_1', type=str, required=True, help='The model name used for endpoint deployment.')
parser.add_argument('--initial_instance_count_1', type=int, required=False, help='Number of instances to launch initially.', default=1)
parser.add_argument('--instance_type_1', type=str, required=False, help='The ML compute instance type.', default='ml.m4.xlarge')
parser.add_argument('--initial_variant_weight_1', type=float, required=False, help='Determines initial traffic distribution among all of the models that you specify in the endpoint configuration.', default=1.0)
parser.add_argument('--accelerator_type_1', choices=['ml.eia1.medium', 'ml.eia1.large', 'ml.eia1.xlarge', ''], type=str, required=False, help='The size of the Elastic Inference (EI) instance to use for the production variant.', default='')
parser.add_argument('--variant_name_2', type=str, required=False, help='The name of the production variant.', default='variant-name-2')
parser.add_argument('--model_name_2', type=str, required=False, help='The model name used for endpoint deployment.', default='')
parser.add_argument('--initial_instance_count_2', type=int, required=False, help='Number of instances to launch initially.', default=1)
parser.add_argument('--instance_type_2', type=str, required=False, help='The ML compute instance type.', default='ml.m4.xlarge')
parser.add_argument('--initial_variant_weight_2', type=float, required=False, help='Determines initial traffic distribution among all of the models that you specify in the endpoint configuration.', default=1.0)
parser.add_argument('--accelerator_type_2', choices=['ml.eia1.medium', 'ml.eia1.large', 'ml.eia1.xlarge', ''], type=str, required=False, help='The size of the Elastic Inference (EI) instance to use for the production variant.', default='')
parser.add_argument('--variant_name_3', type=str, required=False, help='The name of the production variant.', default='variant-name-3')
parser.add_argument('--model_name_3', type=str, required=False, help='The model name used for endpoint deployment.', default='')
parser.add_argument('--initial_instance_count_3', type=int, required=False, help='Number of instances to launch initially.', default=1)
parser.add_argument('--instance_type_3', type=str, required=False, help='The ML compute instance type.', default='ml.m4.xlarge')
parser.add_argument('--initial_variant_weight_3', type=float, required=False, help='Determines initial traffic distribution among all of the models that you specify in the endpoint configuration.', default=1.0)
parser.add_argument('--accelerator_type_3', choices=['ml.eia1.medium', 'ml.eia1.large', 'ml.eia1.xlarge', ''], type=str, required=False, help='The size of the Elastic Inference (EI) instance to use for the production variant.', default='')
parser.add_argument('--resource_encryption_key', type=str, required=False, help='The AWS KMS key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance(s).', default='')
parser.add_argument('--endpoint_config_tags', type=_utils.yaml_or_json_str, required=False, help='An array of key-value pairs, to categorize AWS resources.', default={})
parser.add_argument('--endpoint_name', type=str, required=False, help='The name of the endpoint.', default='')
parser.add_argument('--endpoint_tags', type=_utils.yaml_or_json_str, required=False, help='An array of key-value pairs, to categorize AWS resources.', default={})
parser.add_argument('--endpoint_name_output_path', type=str, default='/tmp/endpoint-name', help='Local output path for the file containing the name of the created endpoint.')
return parser
def main(argv=None):
parser = create_parser()
args = parser.parse_args(argv)
logging.getLogger().setLevel(logging.INFO)
client = _utils.get_sagemaker_client(args.region, args.endpoint_url)
logging.info('Submitting Endpoint request to SageMaker...')
endpoint_name = _utils.deploy_model(client, vars(args))
logging.info('Endpoint creation request submitted. Waiting for completion...')
_utils.wait_for_endpoint_creation(client, endpoint_name)
_utils.write_output(args.endpoint_name_output_path, endpoint_name)
logging.info('Endpoint creation completed.')
if __name__== "__main__":
main(sys.argv[1:])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.