id
stringlengths 3
8
| content
stringlengths 100
981k
|
---|---|
460317
|
from logging import getLogger
def build_models_dict(annotated_models):
"""
Take a list with annotated genetic inheritance patterns for each
family and returns a dictionary with family_id as key and a list of
genetic models as value.
Args:
annotated_models : A list on the form ['1:AD','2:AR_comp|AD_dn']
Returns:
parsed_models : A dictionary on the form
{
1:['AD'],
2:['AD_dn','AR_comp']
}
"""
logger = getLogger(__name__)
logger.debug("Parsing models {0}".format(annotated_models)
)
parsed_models = {}
for family_annotation in annotated_models:
family_id = family_annotation.split(':')[0]
logger.debug("Parsing family {0}".format(family_id))
models = family_annotation.split(':')[1].split('|')
parsed_models[family_id] = models
logger.debug("Adding models {0}".format(models))
return parsed_models
|
460355
|
import network
import numpy as np
import tensorflow as tf
import model3 as M
import data_reader
def grad_loss(x, model):
data, label = x
with tf.GradientTape() as tape:
out = model(data)
loss = tf.reduce_mean(tf.square(out - label))
print(tf.reduce_max(out), tf.reduce_min(out))
grads = tape.gradient(loss, model.trainable_variables)
return grads, [loss]
tf.keras.backend.set_learning_phase(False)
net = network.PosePredNet(19)
M.Saver(net.backbone).restore('./posedetnet/')
M.Saver(net.head).restore('./posedetnet/')
optim = tf.optimizers.Adam(0.0001)
saver = M.Saver(net)
saver.restore('./model/')
# initialize
_ = np.zeros([1,256,256,3]).astype(np.float32)
net(_)
# start training
reader = data_reader.data_reader(16)
MAX_ITER = 100000
for i in range(MAX_ITER+1):
batch = reader.get_next()
grads, lss = grad_loss(batch, net)
optim.apply_gradients(M.zip_grad(grads, net.trainable_variables))
if i%10==0:
print('Iter:%d\tLoss:%.4f'%(i, lss[0]))
if i%100==0 and i>0:
saver.save('./model/model.ckpt')
|
460368
|
import torch
from torch.utils import data
from Datasets.dataset_semantic_SHAB import Dataset as full_supervise_Dataset
from Models.model import Model
import os
import argparse
import matplotlib.pyplot as plt
import torch.nn.functional as F
import numpy as np
checkpoint_logs_name = 'SHA'
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=1337, help='random seed')
parser.add_argument('--dataset', default='SHA', type=str, help='dataset') # SHB
parser.add_argument('--data_path', default='./Data_Crowd_Counting/ShanghaiTech_Crowd_Counting_Dataset/', type=str, help='path to dataset')
parser.add_argument('--load', default=True, action='store_true', help='load checkpoint')
parser.add_argument('--save_path', default='./checkpoints/' + checkpoint_logs_name, type=str, help='path to save checkpoint') # seman_SHB
parser.add_argument('--gpu', default=0, type=int, help='gpu id')
args = parser.parse_args()
def normalize(image, MIN_BOUND, MAX_BOUND):
image = (image - MIN_BOUND) / (MAX_BOUND - MIN_BOUND)
reverse_image = 1 - image
return reverse_image
test_dataset = full_supervise_Dataset(args.data_path, args.dataset, False)
test_loader = data.DataLoader(test_dataset, batch_size=1, shuffle=False)
device = torch.device('cuda:' + str(args.gpu))
def create_model(ema=False):
# Network definition
net = Model()
model = net.to(device)
if ema:
for param in model.parameters():
param.detach_()
return model
model = create_model()
if args.load:
checkpoint = torch.load(os.path.join(args.save_path, 'checkpoint_best.pth'))
model.load_state_dict(checkpoint['model'])
iter_num = 0
model.eval()
print('start validation')
with torch.no_grad():
mae, mse = 0.0, 0.0
for i, (image, gt, den_val_gt, att_val_gt) in enumerate(test_loader):
image = image.to(device)
predict, dmp_to_att_val, seg_val = model(image)
# unc
T = 8
volume_batch_r = image.repeat(2, 1, 1, 1)
stride = volume_batch_r.shape[0] // 2
preds = torch.zeros([stride * T, 2, image.shape[2], image.shape[3]]).cuda()
for i in range(T // 2):
ema_inputs = volume_batch_r + torch.clamp(torch.randn_like(volume_batch_r) * 0.1, -0.2, 0.2)
with torch.no_grad():
_, _, ema_seg = model(ema_inputs)
preds[2 * stride * i:2 * stride * (i + 1)] = ema_seg
preds = F.softmax(preds, dim=1)
preds = preds.reshape(T, stride, 2, image.shape[2], image.shape[3])
preds = torch.mean(preds, dim=0) # (batch /2, 1, 128, 128)
uncertainty = -1.0 * torch.sum(preds * torch.log(preds + 1e-6), dim=1, keepdim=True) # (batch/2, 1, 128, 128)
uncertainty_norm = normalize(uncertainty, 0, np.log(2)) * 7
mae += torch.abs(predict.sum() - den_val_gt.sum()).item()
mse += ((predict.sum() - den_val_gt.sum()) ** 2).item()
# save GT
save_img = np.transpose(image.cpu().numpy().squeeze(), [1, 2, 0]) * 0.2 + 0.45
density_gt = den_val_gt.cpu().numpy().squeeze().astype('float32')
attention_gt = att_val_gt.cpu().numpy().squeeze()
# density
save_pre_den = predict.data
save_pre_den = save_pre_den.cpu().numpy().squeeze().astype('float32')
# dmp_to_seg
save_pre_dmp_to_att = dmp_to_att_val.data
save_pre_dmp_to_att[save_pre_dmp_to_att >= 0.5] = 1.0
save_pre_dmp_to_att[save_pre_dmp_to_att < 0.5] = 0.0
save_pre_dmp_to_att = save_pre_dmp_to_att.cpu().numpy().squeeze() # .astype('uint8')
# seg
save_pre_att_2 = seg_val.data
save_pre_att_2 = save_pre_att_2.cpu().numpy().squeeze().astype('uint8')
save_pre_att_2 = np.transpose(save_pre_att_2, [1, 2, 0])
save_pre_att_2 = np.argmin(save_pre_att_2, axis=2)
# unc
uncertainty = uncertainty.cpu().numpy().squeeze().astype('float32')
uncertainty = uncertainty * (uncertainty > 0.5)
uncertainty_norm = uncertainty_norm.cpu().numpy().squeeze().astype('float32')
uncertainty_norm = uncertainty_norm
plt.figure()
plt.subplot(1, 6, 1)
plt.imshow(save_pre_den)
plt.subplot(1, 6, 2)
plt.imshow(density_gt)
plt.subplot(1, 6, 3)
plt.imshow(save_pre_dmp_to_att)
plt.subplot(1, 6, 4)
plt.imshow(save_pre_att_2)
plt.subplot(1, 6, 5)
plt.imshow(uncertainty, cmap='inferno')
plt.subplot(1, 6, 6)
plt.imshow(uncertainty_norm, cmap='inferno')
plt.show()
mae /= len(test_loader)
mse /= len(test_loader)
mse = mse ** 0.5
print('MAE:', mae, 'MSE:', mse)
|
460384
|
from django.test import TestCase
from constance import settings
from tests.storage import StorageTestsMixin
class TestMemory(StorageTestsMixin, TestCase):
def setUp(self):
self.old_backend = settings.BACKEND
settings.BACKEND = 'constance.backends.memory.MemoryBackend'
super().setUp()
self.config._backend._storage = {}
def tearDown(self):
self.config._backend._storage = {}
settings.BACKEND = self.old_backend
|
460405
|
from pydantic import conint, constr
hexstr = constr(regex=r'^[0-9a-f]+$', strict=True)
hexstr_i = constr(regex=r'^[0-9a-fA-F]+$', strict=True) # case-insensitive hexstr
non_negative_intstr = constr(regex=r'^(?:0|[1-9][0-9]*)$', strict=True)
non_negative_int = conint(ge=0, strict=True)
positive_int = conint(gt=0, strict=True)
class hexstr64(hexstr): # type: ignore
min_length = 64
max_length = 64
class hexstr64_i(hexstr_i): # type: ignore
min_length = 64
max_length = 64
class hexstr128(hexstr): # type: ignore
min_length = 128
max_length = 128
|
460406
|
import logging
from matplotlib.colors import LinearSegmentedColormap
from typing import Sequence, Callable
import matplotlib.figure
from matplotlib import pyplot as plt
import numpy as np
log = logging.getLogger(__name__)
def plotMatrix(matrix, title, xticklabels: Sequence[str], yticklabels: Sequence[str], xlabel: str, ylabel: str, normalize=True, figsize=(9,9),
titleAdd: str = None) -> matplotlib.figure.Figure:
"""
:param matrix: matrix whose data to plot, where matrix[i, j] will be rendered at x=i, y=j
:param title: the plot's title
:param xticklabels: the labels for the x-axis ticks
:param yticklabels: the labels for the y-axis ticks
:param xlabel: the label for the x-axis
:param ylabel: the label for the y-axis
:param normalize: whether to normalise the matrix before plotting it (dividing each entry by the sum of all entries)
:param titleAdd: an optional second line to add to the title
:return: the figure object
"""
matrix = np.transpose(matrix)
if titleAdd is not None:
title += f"\n {titleAdd} "
if normalize:
matrix = matrix.astype('float') / matrix.sum()
fig, ax = plt.subplots(figsize=figsize)
fig.canvas.set_window_title(title.replace("\n", " "))
# We want to show all ticks...
ax.set(xticks=np.arange(matrix.shape[1]),
yticks=np.arange(matrix.shape[0]),
# ... and label them with the respective list entries
xticklabels=xticklabels, yticklabels=yticklabels,
title=title,
xlabel=xlabel,
ylabel=ylabel)
im = ax.imshow(matrix, interpolation='nearest', cmap=plt.cm.Blues)
ax.figure.colorbar(im, ax=ax)
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
fmt = '.2f' if normalize else ('.2f' if matrix.dtype == np.float else 'd')
thresh = matrix.max() / 2.
for i in range(matrix.shape[0]):
for j in range(matrix.shape[1]):
ax.text(j, i, format(matrix[i, j], fmt),
ha="center", va="center",
color="white" if matrix[i, j] > thresh else "black")
fig.tight_layout()
return fig
class Plot:
def __init__(self, draw: Callable[[], plt.Axes] = None, name=None):
"""
:param draw: function which returns a matplotlib.Axes object to show
:param name: name/number of the figure, which determines the window caption; it should be unique, as any plot
with the same name will have its contents rendered in the same window. By default, figures are number
sequentially.
"""
self.fig: matplotlib.figure.Figure = plt.figure(name)
self.ax = draw()
def xlabel(self, label):
plt.xlabel(label)
return self
def ylabel(self, label):
plt.ylabel(label)
return self
def save(self, path):
log.info(f"Saving figure in {path}")
self.fig.savefig(path)
class ScatterPlot(Plot):
def __init__(self, x, y, c=((0, 0, 1, 0.05),), x_label=None, y_label=None, **kwargs):
assert len(x) == len(y)
if x_label is None and hasattr(x, "name"):
x_label = x.name
if y_label is None and hasattr(y, "name"):
y_label = y.name
def draw():
if x_label is not None:
plt.xlabel(x_label)
if x_label is not None:
plt.ylabel(y_label)
return plt.scatter(x, y, c=c, **kwargs)
super().__init__(draw)
class HeatMapPlot(Plot):
DEFAULT_CMAP_FACTORY = lambda numPoints: LinearSegmentedColormap.from_list("whiteToRed", ((0, (1, 1, 1)), (1/numPoints, (1, 0.96, 0.96)), (1, (0.7, 0, 0))), numPoints)
def __init__(self, x, y, xLabel=None, yLabel=None, bins=60, cmap=None, commonRange=True, diagonal=False,
diagonalColor="green", **kwargs):
assert len(x) == len(y)
if xLabel is None and hasattr(x, "name"):
xLabel = x.name
if yLabel is None and hasattr(y, "name"):
yLabel = y.name
def draw():
nonlocal cmap
x_range = [min(x), max(x)]
y_range = [min(y), max(y)]
range = [min(x_range[0], y_range[0]), max(x_range[1], y_range[1])]
if commonRange:
x_range = y_range = range
if diagonal:
plt.plot(range, range, '-', lw=0.75, label="_not in legend", color=diagonalColor, zorder=2)
heatmap, _, _ = np.histogram2d(x, y, range=[x_range, y_range], bins=bins, density=False)
extent = [x_range[0], x_range[1], y_range[0], y_range[1]]
if cmap is None:
cmap = HeatMapPlot.DEFAULT_CMAP_FACTORY(len(x))
if xLabel is not None:
plt.xlabel(xLabel)
if yLabel is not None:
plt.ylabel(yLabel)
return plt.imshow(heatmap.T, extent=extent, origin='lower', interpolation="none", cmap=cmap, zorder=1, aspect="auto", **kwargs)
super().__init__(draw)
|
460419
|
from itertools import islice, count
from tqdm import tqdm
def _k_mers(sequence, k):
it = iter(sequence)
result = tuple(islice(it, k))
if len(result) == k:
yield "".join(result)
for elem in it:
result = result[1:] + (elem,)
yield "".join(result)
def transform(sequence, method="squiggle", bar=False):
"""Transforms a DNA sequence into a series of coordinates for 2D visualization.
Args:
sequence (str): The DNA sequence to transform.
method (str): The method by which to transform the sequence. Defaults to "squiggle". Valid options are ``squiggle``, ``gates``, ``yau``, ``randic`` and ``qi``.
bar (bool): Whether to display a progress bar. Defaults to false.
Returns:
tuple: A tuple containing two lists: one for the x coordinates and one for the y coordinates.
Example:
>>> transform("ATGC")
([0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0], [0, 0.5, 0, -0.5, -1, -0.5, 0, -0.5, 0])
>>> transform("ATGC", method="gates")
([0, 0, 0, 1, 0], [0, -1, 0, 0, 0])
>>> transform("ATGC", method="yau")
([0, 0.5, 1.0, 1.8660254037844386, 2.732050807568877], [0, -0.8660254037844386, 0.0, -0.5, 0.0])
>>> transform("ATGC", method="yau-bp")
([0, 1, 2, 3, 4], [0, -1, 0, -0.5, 0.0])
>>> transform("ATGC", method="randic")
([0, 1, 2, 3], [3, 2, 1, 0])
>>> transform("ATGC", method="qi")
([0, 1, 2], [8, 7, 11])
Warning:
The entire sequence must be able to fit in memory.
Raises:
ValueError: When an invalid character is in the sequence.
"""
sequence = sequence.upper()
if bar:
sequence = tqdm(sequence, unit=" bases", leave=False)
if method == "squiggle":
running_value = 0
_x = count(0, step=0.5)
x, y = [next(_x) for _ in range(2 * len(sequence) + 1)], [0]
for character in sequence:
if character == "A":
y.extend([running_value + 0.5, running_value])
elif character == "C":
y.extend([running_value - 0.5, running_value])
elif character == "T":
y.extend([running_value - 0.5, running_value - 1])
running_value -= 1
elif character == "G":
y.extend([running_value + 0.5, running_value + 1])
running_value += 1
else:
y.extend([running_value] * 2)
return x, y
elif method == "gates":
x, y = [0], [0]
for character in sequence:
if character == "A":
x.append(x[-1]) # no change in x coord
y.append(y[-1] - 1)
elif character == "T":
x.append(x[-1]) # no change in x coord
y.append(y[-1] + 1)
elif character == "G":
x.append(x[-1] + 1)
y.append(y[-1]) # no change in y coord
elif character == "C":
x.append(x[-1] - 1)
y.append(y[-1]) # no change in y coord
else:
raise ValueError(
"Invalid character in sequence: "
+ character
+ ". Gates's method does not support non-ATGC bases. Try using method=squiggle."
)
elif method == "yau":
x, y = [0], [0]
for character in sequence:
if character == "A":
x.append(x[-1] + 0.5)
y.append(y[-1] - ((3 ** 0.5) / 2))
elif character == "T":
x.append(x[-1] + 0.5)
y.append(y[-1] + ((3 ** 0.5) / 2))
elif character == "G":
x.append(x[-1] + ((3 ** 0.5) / 2))
y.append(y[-1] - 0.5)
elif character == "C":
x.append(x[-1] + ((3 ** 0.5) / 2))
y.append(y[-1] + 0.5)
else:
raise ValueError(
"Invalid character in sequence: "
+ character
+ ". Yau's method does not support non-ATGC bases. Try using method=squiggle."
)
elif method == "yau-bp":
_x = count(0)
x, y = [next(_x) for _ in range(len(sequence) + 1)], [0]
for character in sequence:
if character == "A":
y.append(y[-1] - 1)
elif character == "T":
y.append(y[-1] + 1)
elif character == "G":
y.append(y[-1] - 0.5)
elif character == "C":
y.append(y[-1] + 0.5)
else:
raise ValueError(
"Invalid character in sequence: "
+ character
+ ". Yau's method does not support non-ATGC bases. Try using method=squiggle."
)
elif method == "randic":
x, y = [], []
mapping = dict(A=3, T=2, G=1, C=0)
for i, character in enumerate(sequence):
x.append(i)
try:
y.append(mapping[character])
except KeyError:
raise ValueError(
"Invalid character in sequence: "
+ character
+ ". Randić's method does not support non-ATGC bases. Try using method=squiggle."
)
elif method == "qi":
mapping = {
"AA": 12,
"AC": 4,
"GT": 6,
"AG": 0,
"CC": 13,
"CA": 5,
"CG": 10,
"TT": 15,
"GG": 14,
"GC": 11,
"AT": 8,
"GA": 1,
"TG": 7,
"TA": 9,
"TC": 3,
"CT": 2,
}
x, y = [], []
for i, k_mer in enumerate(_k_mers(sequence, 2)):
x.append(i)
try:
y.append(mapping[k_mer])
except KeyError:
raise ValueError(
"Invalid k-mer in sequence: "
+ k_mer
+ ". Qi's method does not support non-ATGC bases. Try using method=squiggle."
)
else:
raise ValueError(
"Invalid method. Valid methods are 'squiggle', 'gates', 'yau', and 'randic'."
)
if bar:
sequence.close()
return x, y
|
460445
|
import requests
import json
dataSet = []
url = 'http://universities.hipolabs.com/search?name='
def readUrl(search):
results = requests.get(url+search)
print("Status Code: ", results.status_code)
print("Headers: Content-Type: ", results.headers['Content-Type'])
# print("Headers: ", results.headers)
return results.json()
if __name__=="__main__":
jsonResult = readUrl('Wales')
# print(jsonResult)
for university in jsonResult:
name = university['name']
url = university['web_pages'][0]
dataSet.append([name,url])
print("Total Universities Found: ",len(dataSet))
print(dataSet)
'''
Status Code: 200
Headers: Content-Type: application/json
Total Universities Found: 10
[['University of Wales', 'http://www.wales.ac.uk/'],
['University of Wales Institute, Cardiff', 'http://www.uwic.ac.uk/'],
['University of Wales College of Medicine', 'http://www.uwcm.ac.uk/'],
['Johnson & Wales University', 'http://www.jwu.edu/'],
['University of New South Wales', 'http://www.unsw.edu.au/'],
['University of Wales, Newport', 'http://www.newport.ac.uk/'],
['University of Wales, Swansea', 'http://www.swan.ac.uk/'],
['University of Wales, Aberystwyth', 'http://www.aber.ac.uk/'],
['University of Wales, Lampeter', 'http://www.lamp.ac.uk/'],
['University of Wales, Bangor', 'http://www.bangor.ac.uk/']]
'''
|
460453
|
import logging
import ply.yacc as yacc
from functools import partial
from rita.lexer import RitaLexer
from rita import macros
logger = logging.getLogger(__name__)
def stub(*args, **kwargs):
return None
def either(a, b):
yield a
yield b
def load_macro(name, config):
try:
return partial(getattr(macros, name), config=config)
except Exception:
pass
def lazy_load(*args, **kwargs):
logger.info(config.modules)
for mod in config.modules:
try:
fn = getattr(mod, name)
return fn(*args, **kwargs)
except Exception as ex:
logger.error(ex)
continue
raise RuntimeError("MACRO {} not loaded".format(name))
return lazy_load
def var_wrapper(variable, config):
def wrapper(*args, **kwargs):
logger.debug("Variables: {}".format(config.variables))
return config.get_variable(variable)
return wrapper
class RitaParser(object):
tokens = RitaLexer.tokens
precedence = (
("nonassoc", "ARROW"),
("nonassoc", "PIPE"),
("nonassoc", "COMMA"),
("left", "EXEC"),
("left", "ASSIGN"),
("left", "RBRACKET", "LBRACKET", "LPAREN", "RPAREN"),
("left", "KEYWORD", "NAME", "LITERAL"),
("right", "MODIF_QMARK", "MODIF_STAR", "MODIF_PLUS"),
)
def __init__(self, config):
self.config = config
self.lexer = None
self.parser = None
def p_document(self, p):
"""
DOCUMENT : MACRO_CHAIN
| MACRO_EXEC
| VARIABLE
"""
logger.debug("Building initial document {}".format(p[1]))
p[0] = [p[1]]
def p_document_list(self, p):
"""
DOCUMENT : DOCUMENT MACRO_CHAIN
| DOCUMENT MACRO_EXEC
| DOCUMENT VARIABLE
"""
logger.debug("Extending document {}".format(p[2]))
p[0] = p[1] + [p[2]]
def p_macro_chain(self, p):
" MACRO_CHAIN : MACRO ARROW MACRO "
logger.debug("Have {0} -> {1}".format(p[1], p[3]))
p[0] = partial(
p[3],
macros.PATTERN(p[1], config=self.config),
config=self.config
)
def p_macro_chain_from_array(self, p):
" MACRO_CHAIN : ARRAY ARROW MACRO "
logger.debug("Have {0} -> {1}".format(p[1], p[3]))
p[0] = partial(
p[3],
macros.PATTERN(*p[1], config=self.config),
config=self.config
)
def p_macro_exec(self, p):
" MACRO_EXEC : EXEC MACRO "
logger.debug("Exec {0}".format(p[2]))
macros.EXEC(p[2], config=self.config)
p[0] = stub
def p_macro_w_modif(self, p):
"""
MACRO : MACRO MODIF_PLUS
| MACRO MODIF_STAR
| MACRO MODIF_QMARK
| MACRO EXEC
"""
logger.debug("Adding modifier to Macro {}".format(p[1]))
fn = p[1]
p[0] = partial(fn, op=p[2])
def p_macro_wo_args(self, p):
" MACRO : KEYWORD "
fn = load_macro(p[1], config=self.config)
logger.debug("Parsing macro (w/o args): {}".format(p[1]))
p[0] = fn
def p_macro_w_args(self, p):
" MACRO : KEYWORD LPAREN ARGS RPAREN "
logger.debug("Parsing macro: {0}, args: {1}".format(p[1], p[3]))
fn = load_macro(p[1], config=self.config)
p[0] = partial(fn, *p[3])
def p_macro_from_array(self, p):
" MACRO : KEYWORD ARRAY "
logger.debug("Parsing macro: {0}, args: {1}".format(p[1], p[2]))
fn = load_macro(p[1], config=self.config)
p[0] = partial(fn, *p[2])
def p_array(self, p):
" ARRAY : LBRACKET ARGS RBRACKET "
p[0] = p[2]
def p_variable(self, p):
" VARIABLE_NAME : NAME "
p[0] = var_wrapper(p[1], self.config)
def p_variable_from_args(self, p):
" VARIABLE : NAME ASSIGN ARGS "
if len(p[3]) == 1:
macros.ASSIGN(p[1], p[3][0], config=self.config)
else:
macros.ASSIGN(p[1], p[3], config=self.config)
p[0] = stub
def p_either(self, p):
" ARG : ARG PIPE ARG "
p[0] = either(p[1], p[3])
def p_arg_list(self, p):
" ARGS : ARGS COMMA ARG "
p[0] = p[1] + [p[3]]
def p_args(self, p):
" ARGS : ARG "
p[0] = [p[1]]
def p_arg(self, p):
" ARG : LITERAL "
p[0] = p[1]
def p_arg_from_macro(self, p):
" ARG : MACRO "
p[0] = p[1]
def p_arg_from_var(self, p):
" ARG : VARIABLE_NAME "
p[0] = p[1]()
def p_arg_from_array(self, p):
" ARGS : ARRAY "
p[0] = p[1]
def p_error(self, p):
if p:
logger.error("Syntax error at '{}'".format(p.value))
else:
logger.error("p is null")
def build(self, **kwargs):
self.lexer = RitaLexer().build(**kwargs)
self.parser = yacc.yacc(module=self, errorlog=logger, **kwargs)
def parse(self, data):
if data.strip() == "":
return []
return self.parser.parse(r"{}".format(data), lexer=self.lexer, debug=logger)
|
460470
|
from pathlib import Path
from whispers.utils import string_is_function, string_is_quoted, strip_string
class Javascript:
def pairs(self, filepath: Path):
for line in filepath.open("r").readlines():
if line.count("=") == 1:
yield from self.parse_assignment(line)
def parse_assignment(self, line: str):
key, value = line.split("=")
key = strip_string(key).split(" ")[-1]
value = value.replace(";", "").strip()
if string_is_quoted(value) and not string_is_function(value):
yield key, value
|
460480
|
import datetime
from bitmovin import Bitmovin, Encoding, S3Input, S3Output, H264CodecConfiguration, \
AACCodecConfiguration, H264Profile, StreamInput, SelectionMode, Stream, EncodingOutput, ACLEntry, ACLPermission, \
MP4Muxing, MuxingStream, CloudRegion, SmoothManifest, MP4Representation, PlayReadyDRM, PlayReadyMethod, \
SmoothContentProtection, Condition
from bitmovin.errors import BitmovinError
API_KEY = '<YOUR_API_KEY>'
S3_INPUT_ACCESSKEY = '<YOUR_S3_OUTPUT_ACCESSKEY>'
S3_INPUT_SECRETKEY = '<YOUR_S3_OUTPUT_SECRETKEY>'
S3_INPUT_BUCKETNAME = '<YOUR_S3_OUTPUT_BUCKETNAME>'
S3_INPUT_PATH = '<YOUR_S3_INPUT_PATH>'
S3_OUTPUT_ACCESSKEY = '<YOUR_S3_OUTPUT_ACCESSKEY>'
S3_OUTPUT_SECRETKEY = '<YOUR_S3_OUTPUT_SECRETKEY>'
S3_OUTPUT_BUCKETNAME = '<YOUR_S3_OUTPUT_BUCKETNAME>'
PLAYREADY_KEYSEED = '<YOUR_CENC_KEY>'
PLAYREADY_KID = '<YOUR_CENC_KID>'
PLAYREADY_LA_URL = '<YOUR_PLAYREADY_LA_URL>'
date_component = str(datetime.datetime.now()).replace(' ', '_').replace(':', '-').split('.')[0].replace('_', '__')
OUTPUT_BASE_PATH = 'output/python-smooth/{}/'.format(date_component)
# Please set here the encoding profiles. You can modify height, bitrate and fps.
encoding_profiles_h264 = [
dict(height=240, bitrate=400, fps=None, profile=H264Profile.HIGH),
dict(height=360, bitrate=800, fps=None, profile=H264Profile.HIGH),
dict(height=480, bitrate=1200, fps=None, profile=H264Profile.HIGH),
dict(height=720, bitrate=2400, fps=None, profile=H264Profile.HIGH),
]
def main():
bitmovin = Bitmovin(api_key=API_KEY)
s3_input = S3Input(access_key=S3_INPUT_ACCESSKEY,
secret_key=S3_INPUT_SECRETKEY,
bucket_name=S3_INPUT_BUCKETNAME,
name='Sample S3 Output')
s3_input = bitmovin.inputs.S3.create(s3_input).resource
s3_output = S3Output(access_key=S3_OUTPUT_ACCESSKEY,
secret_key=S3_OUTPUT_SECRETKEY,
bucket_name=S3_OUTPUT_BUCKETNAME,
name='Sample S3 Output')
s3_output = bitmovin.outputs.S3.create(s3_output).resource
acl_entry = ACLEntry(permission=ACLPermission.PUBLIC_READ)
encoding = Encoding(name='example mp4 encoding for smooth + playready',
cloud_region=CloudRegion.GOOGLE_EUROPE_WEST_1)
encoding = bitmovin.encodings.Encoding.create(encoding).resource
encoding_configs = []
# Iterate over all encoding profiles and create the H264 configuration with the defined height and bitrate.
for idx, _ in enumerate(encoding_profiles_h264):
profile_h264 = encoding_profiles_h264[idx]
encoding_config = dict(profile_h264=profile_h264)
h264_codec = H264CodecConfiguration(
name='H264 Codec {}p {}k Configuration'.format(profile_h264.get('height'),
profile_h264.get('bitrate')),
bitrate=profile_h264.get('bitrate') * 1000,
height=profile_h264.get('height'),
profile=profile_h264.get('profile'),
rate=profile_h264.get("fps"))
encoding_config['h264_codec'] = bitmovin.codecConfigurations.H264.create(h264_codec).resource
encoding_configs.append(encoding_config)
audio_codec_configuration = AACCodecConfiguration(name='example_audio_codec_configuration_english',
bitrate=128000,
rate=48000)
audio_codec_configuration = bitmovin.codecConfigurations.AAC.create(audio_codec_configuration).resource
video_input_stream = StreamInput(input_id=s3_input.id,
input_path=S3_INPUT_PATH,
selection_mode=SelectionMode.AUTO)
audio_input_stream = StreamInput(input_id=s3_input.id,
input_path=S3_INPUT_PATH,
selection_mode=SelectionMode.AUTO)
# With the configurations and the input file streams are now created and muxed later on.
for encoding_config in encoding_configs:
encoding_profile = encoding_config.get("profile_h264")
video_stream_condition = Condition(attribute="HEIGHT", operator=">=", value=str(encoding_profile.get('height')))
video_stream_h264 = Stream(codec_configuration_id=encoding_config.get("h264_codec").id,
input_streams=[video_input_stream],
conditions=video_stream_condition,
name='Stream H264 {}p_{}k'.format(encoding_profile.get('height'),
encoding_profile.get('bitrate')))
encoding_config['h264_stream'] = bitmovin.encodings.Stream.create(object_=video_stream_h264,
encoding_id=encoding.id).resource
audio_stream = Stream(codec_configuration_id=audio_codec_configuration.id,
input_streams=[audio_input_stream],
name='Sample Stream AUDIO')
audio_stream = bitmovin.encodings.Stream.create(object_=audio_stream, encoding_id=encoding.id).resource
for encoding_config in encoding_configs:
encoding_profile = encoding_config.get("profile_h264")
video_muxing_stream_h264 = MuxingStream(encoding_config.get("h264_stream").id)
video_muxing_output_h264 = EncodingOutput(output_id=s3_output.id, output_path=OUTPUT_BASE_PATH, acl=[acl_entry])
video_muxing_h264 = MP4Muxing(filename='video_{}p.ismv'.format(encoding_profile.get('height')),
fragment_duration=4000,
streams=[video_muxing_stream_h264],
outputs=[video_muxing_output_h264],
name='Sample Muxing {}p'.format(encoding_profile.get('height')))
encoding_config['h264_muxing'] = bitmovin.encodings.Muxing.MP4.create(object_=video_muxing_h264,
encoding_id=encoding.id).resource
playready_drm = PlayReadyDRM(key_seed=PLAYREADY_KEYSEED,
kid=PLAYREADY_KID,
method=PlayReadyMethod.PIFF_CTR,
la_url=PLAYREADY_LA_URL,
outputs=[video_muxing_output_h264],
name="PlayReady")
encoding_config['playready_drm'] = bitmovin.encodings.Muxing.MP4.DRM.PlayReady.create(object_=playready_drm,
encoding_id=encoding.id,
muxing_id=encoding_config['h264_muxing'].id).resource
audio_muxing_stream = MuxingStream(audio_stream.id)
audio_muxing_output = EncodingOutput(output_id=s3_output.id, output_path=OUTPUT_BASE_PATH, acl=[acl_entry])
audio_muxing = MP4Muxing(filename='audio.isma',
fragment_duration=4000,
streams=[audio_muxing_stream],
outputs=[audio_muxing_output],
name='Sample Muxing AUDIO')
audio_muxing = bitmovin.encodings.Muxing.MP4.create(object_=audio_muxing, encoding_id=encoding.id).resource
playready_audio = PlayReadyDRM(key_seed=PLAYREADY_KEYSEED,
kid=PLAYREADY_KID,
method=PlayReadyMethod.PIFF_CTR,
la_url=PLAYREADY_LA_URL,
outputs=[audio_muxing_output],
name='PlayReady')
playready_audio = bitmovin.encodings.Muxing.MP4.DRM.PlayReady.create(object_=playready_audio,
encoding_id=encoding.id,
muxing_id=audio_muxing.id).resource
bitmovin.encodings.Encoding.start(encoding_id=encoding.id)
try:
bitmovin.encodings.Encoding.wait_until_finished(encoding_id=encoding.id)
except BitmovinError as bitmovin_error:
print("Exception occurred while waiting for encoding to finish: {}".format(bitmovin_error))
manifest_output = EncodingOutput(output_id=s3_output.id,
output_path=OUTPUT_BASE_PATH,
acl=[acl_entry])
smooth_manifest = SmoothManifest(server_manifest_name='example_manifest_smooth.ism',
client_manifest_name='example_manifest_smooth.ismc',
outputs=[manifest_output],
name='Sample SmoothStreaming Manifest')
smooth_manifest = bitmovin.manifests.Smooth.create(object_=smooth_manifest).resource
for encoding_config in encoding_configs:
encoding_profile = encoding_config.get("profile_h264")
muxing = encoding_config.get('h264_muxing')
mp4_representation = MP4Representation(encoding_id=encoding.id,
muxing_id=muxing.id,
media_file='video_{}p.ismv'.format(encoding_profile.get('height')))
encoding_config['h264_smooth'] = bitmovin.manifests.Smooth.MP4Representation.create(manifest_id=smooth_manifest.id,
object_=mp4_representation)
mp4_representation_audio = MP4Representation(encoding_id=encoding.id,
muxing_id=audio_muxing.id,
media_file='audio.isma')
bitmovin.manifests.Smooth.MP4Representation.create(manifest_id=smooth_manifest.id, object_=mp4_representation_audio)
content_protection = SmoothContentProtection(encoding_id=encoding.id,
muxing_id=audio_muxing.id,
drm_id=playready_audio.id)
bitmovin.manifests.Smooth.ContentProtection.create(object_=content_protection, manifest_id=smooth_manifest.id)
bitmovin.manifests.Smooth.start(manifest_id=smooth_manifest.id)
try:
bitmovin.manifests.Smooth.wait_until_finished(manifest_id=smooth_manifest.id)
except BitmovinError as bitmovin_error:
print("Exception occurred while waiting for Smooth manifest creation to finish: {}".format(bitmovin_error))
if __name__ == '__main__':
main()
|
460483
|
import os
import shutil
from time import time
from datetime import datetime
class Utils(object):
''' Static Helpers.'''
@staticmethod
def rm_tree(path):
'''Removes all the files.'''
if os.path.isdir(path):
if os.name == 'nt':
#Hack for Windows. Shutil can't remove files with a path longer than 260.
cmd = "rd {0} /s /q".format(path)
os.system(cmd)
else:
shutil.rmtree(path)
@staticmethod
def get_groupped_classes(tree):
# Group classes by relative path to generate index.html.
groups = []
classes = []
key = tree.classes[0].folder
classes.append(tree.classes[0])
for i in range(1, len(tree.classes)):
if tree.classes[i].folder == key:
classes.append(tree.classes[i])
else:
groups.append(classes)
key = tree.classes[i].folder
classes = [tree.classes[i]]
groups.append(classes)
return groups
@staticmethod
def get_package_name(smali_class_name):
'''Returns the package name by given smali class name.
Format: org/android/rock.'''
package_name, f = os.path.split(smali_class_name)
return package_name[1:]
@staticmethod
def get_standart_package_name(smali_class_name):
'''Returns the package name by given smali class name.
Format: org.android.rock.'''
return Utils.get_package_name(smali_class_name).replace('/', '.')
@staticmethod
def is_in_ranges(i, ranges):
'''Returns if i is in the intervals ranges.
Ranges variable contains list of indexes intervals.'''
for r in ranges:
if i >= r[0]:
if i < r[1]:
return True
return False
return False
@staticmethod
def scan_synchronized_tries(method):
'''Returns list of intervals of indexes where the insnsmust be throw safe.
Otherwise, VerifyChecker recognizes the code as invalid.'''
ranges = []
for tr in method.tries:
if tr.handler.name.startswith("catchall"):
start = tr.start.index
end = tr.end.index
if tr.handler.index < tr.end.index and tr.handler.index >= tr.start.index:
end = tr.handler.index
for i in range(start, end):
if method.insns[i].opcode_name.startswith("monitor-exit"):
ranges.append([i, end])
break
return ranges
@staticmethod
def copytree(src, dst, symlinks=False, ignore=None):
if not os.path.exists(dst):
os.makedirs(dst)
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if os.path.isdir(s):
Utils.copytree(s, d, symlinks, ignore)
else:
if not os.path.exists(d) or os.stat(s).st_mtime - os.stat(d).st_mtime > 1:
shutil.copy2(s, d)
@staticmethod
def log_entry(log_path, entry, sep=','):
if not os.path.exists(log_path):
with open(log_path, 'w') as log_file:
log_file.write("sep={}\n".format(sep))
log_file.flush()
with open(log_path, 'a+') as log_file:
log_file.write(entry)
log_file.flush()
def timeit(method):
'''Measures the working time of the method.'''
def wrapper(*args, **kwargs):
start = time()
result = method(*args, **kwargs)
end = time()
time_log_path = os.path.join("times_log.csv")
args_str = ";".join(map(str,args))
entry = "{0};{1};{2};{3}\n".format(datetime.now(), end - start, method.__name__.lower(), args_str)
Utils.log_entry(time_log_path, entry, sep=";")
return result
return wrapper
|
460493
|
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^', include('dabangcrawler.urls', namespace='dabangcrawler')),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
|
460515
|
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import *
import logging
import emission.core.get_database as edb
import emission.core.wrapper.entry as ecwe
import emission.storage.timeseries.abstract_timeseries as esta
import emission.storage.decorations.analysis_timeseries_queries as esda
def get_last_place_entry(key, user_id):
"""
There are many ways to find the last place. One would be to find the one
with the max enter_ts. But that is not performant because we would need to
retrieve all the enter_ts and find their max, which is expensive. Instead, we
use the property that we process data in chunks of trips, so the last place
would have been created and entered but not exited.
:param key:
"""
ts = esta.TimeSeries.get_time_series(user_id)
ret_place_doc = ts.analysis_timeseries_db.find_one({'user_id': user_id,
'metadata.key': key,
'data.exit_ts' : {'$exists': False}})
logging.debug("last place doc = %s" % ret_place_doc)
if ret_place_doc is None:
return None
ret_place = ecwe.Entry(ret_place_doc)
assert('exit_ts' not in ret_place.data)
assert('exit_fmt_time' not in ret_place.data)
assert('starting_trip' not in ret_place.data)
return ret_place
def get_first_place_entry(key, user_id):
"""
Similar to get_last_place_entry, only finding one with only an exit_ts
and no enter_ts.
"""
ts = esta.TimeSeries.get_time_series(user_id)
ret_place_doc = ts.analysis_timeseries_db.find_one({'user_id': user_id,
'metadata.key': key,
'data.enter_ts' : {'$exists': False}})
logging.debug("first place doc = %s" % ret_place_doc)
if ret_place_doc is None:
return None
ret_place = ecwe.Entry(ret_place_doc)
assert('enter_ts' not in ret_place.data)
assert('enter_fmt_time' not in ret_place.data)
assert('ending_trip' not in ret_place.data)
return ret_place
def get_last_place_before(place_key, reset_ts, user_id):
"""
Unlike `get_last_place_before` which returns the last place in the
timeline, this returns the last place before a particular timestamp.
Used to reset the pipeline, for example.
To implement this, we can't just look for places before that timestamp,
because then we will get a list. And we don't want to retrieve all of them
and sort either.
We can look for places that exit after that timestamp, but that will also
give a list. But hopefully, a shorter list, so that we don't have to sort
as much. I can't think of an alternative that doesn't require sorting.
Oh wait! There is an alternative!
We can look for the place that has an enter timestamp before the ts and an
exit timestamp after, or a trip that has a start timestamp before the ts
and an end timestamp after. We should only find one. And if we find the
trip then the place is its start place.
Note that these correspond to the two use cases in
https://github.com/e-mission/e-mission-server/issues/333
"""
trip_key_query = _get_trip_key_query(place_key)
logging.debug("Looking for last place before %s" % reset_ts)
ts = esta.TimeSeries.get_time_series(user_id)
all_user_places = list(edb.get_analysis_timeseries_db().find(
{"user_id": user_id, "metadata.key": place_key},
{"_id": True, "data.enter_fmt_time": True, "data.exit_fmt_time": True}))
logging.debug("all places for this user = %s" % all_user_places)
ret_place_doc = ts.analysis_timeseries_db.find_one({'user_id': user_id,
'metadata.key': place_key,
'data.exit_ts' : {'$gt': reset_ts},
'data.enter_ts': {'$lt': reset_ts}
})
logging.debug("last place doc for user %s = %s" % (user_id, ret_place_doc))
ret_trip_doc = ts.analysis_timeseries_db.find_one({'user_id': user_id,
'metadata.key': trip_key_query,
'data.end_ts' : {'$gt': reset_ts},
'data.start_ts': {'$lt': reset_ts}
})
logging.debug("last trip doc for user %s = %s" % (user_id, ret_trip_doc))
if ret_place_doc is None and ret_trip_doc is None:
# Check to see if the pipeline ended before this
last_place = get_last_place_entry(place_key, user_id)
logging.debug("last_place = %s, reset_ts = %s" %
(last_place, reset_ts))
if last_place is None:
return None
elif last_place.data.enter_ts is None:
return None
elif last_place.data.enter_ts < reset_ts:
return last_place
else:
raise ValueError("No trip or place straddling time %s for user %s" %
(reset_ts, user_id))
if ret_place_doc is None:
assert ret_trip_doc is not None
logging.info("ret_trip_doc start = %s, end = %s" %
(ret_trip_doc["data"]["start_fmt_time"],
ret_trip_doc["data"]["end_fmt_time"]))
ret_place_doc = esda.get_entry(place_key, ret_trip_doc["data"]['start_place'])
assert ret_place_doc is not None
ret_place = ecwe.Entry(ret_place_doc)
return ret_place
def _get_trip_key_query(place_key):
if place_key == esda.CLEANED_PLACE_KEY:
return {"$in": [esda.CLEANED_TRIP_KEY, esda.CLEANED_UNTRACKED_KEY]}
elif place_key == esda.RAW_PLACE_KEY:
return {"$in": [esda.RAW_TRIP_KEY, esda.RAW_UNTRACKED_KEY]}
else:
raise RuntimeException("Invalid place key %s" % place_key)
|
460540
|
from HelperfunctionsNew import *
import sys
import os
if __name__ == "__main__":
blockDesc = sys.argv[1]
helper = Helper()
herlper.get
if blockDesc in ["Skit Name", "Synopsis", "Minigame"]:
helper.createBlock_Multi(blockDesc)
elif blockDesc != "All":
print("Create the script based on google sheet")
helper.createAtlasScript_Block(blockDesc)
print("Create the SLPS for this block")
helper.reinsertText_Block(blockDesc)
else:
helper.createAtlasScript_All()
print("Create the SLPS for this block")
helper.reinsertText_All(blockDesc)
|
460559
|
import pytest
@pytest.fixture(scope='session')
def django_db_setup():
from django.conf import settings
settings.DATABASES['default'] = {
"ENGINE": 'django.db.backends.postgresql',
"NAME": 'escalate',
"USER": 'escalate',
"PASSWORD": '<PASSWORD>',
"HOST": 'localhost',
"PORT": 5432,
'OPTIONS': {
'options': '-c search_path=dev'
}
}
|
460570
|
from dexy.filter import DexyFilter
from dexy.utils import parse_yaml
import re
class YamlargsFilter(DexyFilter):
"""
Specify attributes in YAML at top of file.
"""
aliases = ['yamlargs']
def process_text(self, input_text):
regex = "\r?\n---\r?\n"
if re.search(regex, input_text):
self.log_debug("Found yaml content.")
raw_yamlargs, content = re.split(regex, input_text)
yamlargs = parse_yaml(raw_yamlargs)
self.log_debug("Adding yaml: %s" % yamlargs)
self.add_runtime_args(yamlargs)
return content
else:
self.log_debug("No yaml content found.")
return input_text
|
460576
|
from ..value_set import ValueSet
class AcuteInpatient(ValueSet):
"""
**Clinical Focus:** This value set contains concepts related to acute inpatient visits.
**Data Element Scope:** This value set may use Quality Data Model (QDM) datatype related to Encounter, Performed.
**Inclusion Criteria:** Includes only relevant concepts associated with comprehensive history, evaluation, and management of a patient in an acute inpatient setting. This is a grouping value set of CPT and SNOMED codes.
**Exclusion Criteria:** No exclusions.
"""
OID = '2.16.840.1.113883.3.464.1003.101.12.1083'
VALUE_SET_NAME = 'Acute Inpatient'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
CPT = {
'99221',
'99222',
'99223',
'99231',
'99232',
'99233',
'99238',
'99239',
'99251',
'99252',
'99253',
'99254',
'99255',
'99291'
}
SNOMEDCT = {
'112689000',
'1505002',
'15584006',
'183450002',
'183481006',
'183487005',
'183488000',
'183489008',
'183491000',
'183492007',
'183493002',
'183494008',
'183495009',
'183496005',
'183497001',
'183498006',
'183499003',
'183500007',
'183501006',
'183502004',
'183503009',
'183504003',
'183505002',
'183506001',
'183507005',
'183508000',
'183509008',
'183510003',
'183511004',
'183512006',
'235313004',
'25986004',
'287927002',
'304566005',
'305337004',
'305338009',
'305341000',
'305342007',
'305350003',
'305354007',
'305355008',
'305356009',
'305357000',
'305358005',
'305359002',
'305360007',
'305361006',
'305362004',
'305363009',
'305364003',
'305365002',
'305366001',
'305367005',
'305368000',
'305369008',
'305370009',
'305371008',
'305372001',
'305374000',
'305375004',
'305376003',
'305377007',
'305378002',
'305379005',
'305380008',
'305382000',
'305383005',
'305384004',
'305385003',
'305386002',
'305387006',
'305388001',
'305389009',
'305390000',
'305391001',
'305392008',
'305393003',
'305394009',
'305395005',
'305396006',
'305397002',
'305399004',
'305400006',
'305401005',
'305402003',
'305403008',
'305404002',
'305405001',
'305406000',
'305407009',
'305408004',
'305409007',
'305410002',
'305411003',
'305412005',
'305413000',
'305414006',
'305415007',
'305416008',
'305417004',
'305418009',
'305419001',
'305420007',
'305421006',
'305422004',
'305423009',
'305424003',
'305425002',
'305426001',
'305427005',
'305428000',
'305429008',
'305430003',
'305431004',
'305432006',
'305433001',
'305434007',
'305435008',
'306732000',
'306803007',
'306967009',
'308251003',
'308252005',
'308253000',
'310361003',
'3241000175106',
'32485007',
'373113001',
'397769005',
'398162007',
'405614004',
'417005',
'432621000124105',
'442281000124108',
'447941000124106',
'448421000124105',
'448431000124108',
'448441000124103',
'448851000124103',
'4563007',
'45702004',
'47348005',
'48183000',
'51032003',
'51501005',
'5161006',
'52748007',
'60059000',
'63551005',
'699124006',
'70755000',
'71290004',
'76193006',
'76464004',
'81672003',
'82942009',
'8715000'
}
class CareServicesInLongTermResidentialFacility(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent patients living in assisted living, domiciliary care or rest homes who have had an interaction with a member of their medical team.
**Data Element Scope:** This value set may use the Quality Data Model (QDM) datatype related to Encounter, Performed.
**Inclusion Criteria:** Includes only relevant concepts associated with services provided to new and established patients living in assisted living, domiciliary care or rest home. This is a grouping value set of CPT codes.
**Exclusion Criteria:** Excludes visits in settings other than assisted living, domiciliary care or rest homes.
"""
OID = '2.16.840.1.113883.3.464.1003.101.12.1014'
VALUE_SET_NAME = 'Care Services in Long-Term Residential Facility'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
CPT = {
'99324',
'99325',
'99326',
'99327',
'99328',
'99334',
'99335',
'99336',
'99337'
}
SNOMEDCT = {
'209099002',
'210098006'
}
class ClinicalOralEvaluation(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent patients who had a clinical oral evaluation.
**Data Element Scope:** This value set may use the Quality Data Model (QDM) datatype related to Encounter, Performed.
**Inclusion Criteria:** Includes only relevant concepts associated with periodic, limited (problem focused
**Exclusion Criteria:** No exclusions.
"""
OID = '2.16.840.1.113883.3.464.1003.125.12.1003'
VALUE_SET_NAME = 'Clinical Oral Evaluation'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
CDT = {
'D0120',
'D0140',
'D0145',
'D0150',
'D0160',
'D0170',
'D0180'
}
class ContactOrOfficeVisit(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent contact and office visits for new and established patients, and includes in-person, telephone, online, and other visit types related to depression encounters.
**Data Element Scope:** This value set may use the Quality Data Model (QDM) datatype related to Encounter, Performed.
**Inclusion Criteria:** Includes only relevant concepts associated with outpatient contact and office visits in which a patient may be evaluated for depression. This groups CPT and HCPCS codes.
**Exclusion Criteria:** Excludes inpatients for purposes of the index event. The majority of CPT codes are specified for outpatient visit types; however psychiatry and psychotherapy visits can be used in the inpatient setting.
"""
OID = '2.16.840.1.113762.1.4.1080.5'
VALUE_SET_NAME = 'Contact or Office Visit'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
CPT = {
'90791',
'90792',
'90832',
'90834',
'90837',
'99201',
'99202',
'99203',
'99204',
'99205',
'99211',
'99212',
'99213',
'99214',
'99215',
'99421',
'99422',
'99423',
'99441',
'99442',
'99443',
'99444'
}
HCPCSLEVELII = {
'G0402',
'G0438',
'G0439',
'G2061',
'G2062',
'G2063'
}
class DetoxificationVisit(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent detoxification visits.
**Data Element Scope:** This value set may use the Quality Data Model (QDM) datatype related to Encounter, Performed.
**Inclusion Criteria:** Includes only relevant concepts associated with identifying alcohol and drug detoxification. This is a grouping of SNOMED CT codes.
**Exclusion Criteria:** No exclusions.
"""
OID = '2.16.840.1.113883.3.464.1003.101.12.1059'
VALUE_SET_NAME = 'Detoxification Visit'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
SNOMEDCT = {
'182969009',
'20093000',
'23915005',
'414054004',
'414056002',
'56876005',
'61480009',
'64297001',
'67516001',
'87106005'
}
class DischargeServicesHospitalInpatient(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent inpatient hospital discharge services.
**Data Element Scope:** This value set may use the Quality Data Model (QDM) datatype related to Encounter, Performed.
**Inclusion Criteria:** Includes only relevant concepts associated with identifying hospital discharge day management. This is a grouping of CPT codes.
**Exclusion Criteria:** No exclusions.
"""
OID = '2.16.840.1.113883.3.464.1003.101.12.1007'
VALUE_SET_NAME = 'Discharge Services - Hospital Inpatient'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
CPT = {
'99238',
'99239'
}
class DischargeServicesHospitalInpatientSameDayDischarge(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent inpatient hospital same day discharge services.
**Data Element Scope:** This value set may use the Quality Data Model (QDM) datatype related to Encounter, Performed.
**Inclusion Criteria:** Includes only relevant concepts associated with identifying observation or inpatient care for the evaluation and management of a patient that results in discharge on the same date of admission. This is a grouping of CPT codes.
**Exclusion Criteria:** No exclusions.
"""
OID = '2.16.840.1.113883.3.464.1003.101.12.1006'
VALUE_SET_NAME = 'Discharge Services - Hospital Inpatient Same Day Discharge'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
CPT = {
'99234',
'99235',
'99236'
}
class DischargeServicesNursingFacility(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent patients who have been discharged from a nursing facility.
**Data Element Scope:** This value set may use the Quality Data Model (QDM) datatype related to Encounter, Performed.
**Inclusion Criteria:** Includes only relevant concepts associated with discharge from a nursing facility, including a final examination, instructions for continuing care and preparation of discharge records, prescriptions, and referral forms. Discharge services encounters can be less than or over 30 minutes. This is a grouping of CPT codes.
**Exclusion Criteria:** Excludes discharges from settings other than a nursing facility.
"""
OID = '2.16.840.1.113883.3.464.1003.101.12.1013'
VALUE_SET_NAME = 'Discharge Services - Nursing Facility'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
CPT = {
'99315',
'99316'
}
class DischargeServicesNursingFacility_1065(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent patients who have been discharged from a nursing facility.
**Data Element Scope:** This value set may use the Quality Data Model (QDM) datatype related to Encounter, Performed.
**Inclusion Criteria:** Includes only relevant concepts associated with discharge from a nursing facility, including a final examination, instructions for continuing care and preparation of discharge records, prescriptions, and referral forms. Discharge services encounters can be less than or over 30 minutes. This is a grouping of CPT codes.
**Exclusion Criteria:** Excludes discharges from settings other than a nursing facility.
"""
OID = '2.16.840.1.113883.3.464.1003.101.11.1065'
VALUE_SET_NAME = 'Discharge Services - Nursing Facility'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
CPT = {
'99315',
'99316'
}
class Ed(ValueSet):
"""
**Clinical Focus:** This value set contains concepts related to an ED visit.
**Data Element Scope:** This value set may use Quality Data Model (QDM) datatype related to Encounter, Performed.
**Inclusion Criteria:** Includes only relevant concepts associated with comprehensive history, evaluation, and management of a patient in an ED. This is a grouping value set of CPT and SNOMED codes.
**Exclusion Criteria:** No exclusions.
"""
OID = '2.16.840.1.113883.3.464.1003.101.12.1085'
VALUE_SET_NAME = 'ED'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
CPT = {
'99281',
'99282',
'99283',
'99284',
'99285'
}
SNOMEDCT = {
'4525004'
}
class EmergencyDepartmentVisit(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent patients who have had an interaction with a member of their medical care team in the emergency department.
**Data Element Scope:** This value set may use the Quality Data Model (QDM) datatype related to Encounter, Performed.
**Inclusion Criteria:** Includes only relevant concepts associated with care provided to new and established patients in the emergency department. This is a value set grouping that includes CPT and SNOMED CT codes.
**Exclusion Criteria:** Excludes services not performed in the emergency department, including critical care and observation services.
"""
OID = '2.16.840.1.113883.3.464.1003.101.12.1010'
VALUE_SET_NAME = 'Emergency Department Visit'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
CPT = {
'99281',
'99282',
'99283',
'99284',
'99285'
}
SNOMEDCT = {
'4525004'
}
class FrailtyEncounter(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent nursing care services provided to frail patients.
**Data Element Scope:** This value set may use Quality Data Model (QDM) datatype related to Encounter, Performed.
**Inclusion Criteria:** Includes only relevant concepts associated with nursing care and home care services provided to frail patients. This is a grouping of CPT, HCPCS, and SNOMEDCT codes.
**Exclusion Criteria:** No exclusions.
"""
OID = '2.16.840.1.113883.3.464.1003.101.12.1088'
VALUE_SET_NAME = 'Frailty Encounter'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
CPT = {
'99504',
'99509'
}
HCPCSLEVELII = {
'G0162',
'G0299',
'G0300',
'G0493',
'G0494',
'S0271',
'S0311',
'S9123',
'S9124',
'T1000',
'T1001',
'T1002',
'T1003',
'T1004',
'T1005',
'T1019',
'T1020',
'T1021',
'T1022',
'T1030',
'T1031'
}
SNOMEDCT = {
'413467001'
}
class HomeHealthcareServices(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent patients who have had a home health visit by a provider for the evaluation or management of a new or existing patient.
**Data Element Scope:** This value set may use the Quality Data Model (QDM) datatype related to Encounter, Performed.
**Inclusion Criteria:** Includes only relevant concepts associated with home visits for the evaluation and management of a new or established patient. This is a grouping value set of CPT and SNOMED CT codes.
**Exclusion Criteria:** No exclusions.
"""
OID = '2.16.840.1.113883.3.464.1003.101.12.1016'
VALUE_SET_NAME = 'Home Healthcare Services'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
CPT = {
'99341',
'99342',
'99343',
'99344',
'99345',
'99347',
'99348',
'99349',
'99350'
}
SNOMEDCT = {
'185460008',
'185462000',
'185466002',
'185467006',
'185468001',
'185470005',
'225929007',
'315205008',
'439708006',
'698704008',
'704126008'
}
class HospitalInpatientVisitInitial(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent inpatient hospital visits.
**Data Element Scope:** This value set may use the Quality Data Model (QDM) datatype related to Encounter, Performed.
**Inclusion Criteria:** Includes only relevant concepts associated with initial hospital care for the evaluation and management of a patient. This is a grouping of CPT codes.
**Exclusion Criteria:** No exclusions.
"""
OID = '2.16.840.1.113883.3.464.1003.101.12.1004'
VALUE_SET_NAME = 'Hospital Inpatient Visit - Initial'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
CPT = {
'99221',
'99222',
'99223'
}
class HospitalObservationCareInitial(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent initial inpatient hospital observation care.
**Data Element Scope:** This value set may use the Quality Data Model (QDM) datatype related to Encounter, Performed.
**Inclusion Criteria:** Includes only relevant concepts associated with initial observation care for the evaluation and management of a patient. This is a grouping of CPT codes.
**Exclusion Criteria:** No exclusions.
"""
OID = '2.16.840.1.113883.3.464.1003.101.12.1002'
VALUE_SET_NAME = 'Hospital Observation Care - Initial'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
CPT = {
'99218',
'99219',
'99220'
}
class MedicalDisabilityExam(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent work related or medical disability examinations.
**Data Element Scope:** This value set may use the Quality Data Model (QDM) datatype related to Encounter, Performed.
**Inclusion Criteria:** Includes only relevant concepts associated with work related or medical disability examinations. This is a grouping of CPT codes.
**Exclusion Criteria:** No exclusions.
"""
OID = '2.16.840.1.113883.3.464.1003.101.11.1233'
VALUE_SET_NAME = 'Medical Disability Exam'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
CPT = {
'99455',
'99456'
}
class NonacuteInpatient(ValueSet):
"""
**Clinical Focus:** This value set contains concepts related to nonacute inpatient visits.
**Data Element Scope:** This value set may use Quality Data Model (QDM) datatype related to Encounter, Performed.
**Inclusion Criteria:** Includes only relevant concepts associated with comprehensive history, evaluation, and management of a patient in a nonacute inpatient setting. This is a grouping value set of CPT and SNOMED codes.
**Exclusion Criteria:** No exclusions.
"""
OID = '2.16.840.1.113883.3.464.1003.101.12.1084'
VALUE_SET_NAME = 'Nonacute Inpatient'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
CPT = {
'99304',
'99305',
'99306',
'99307',
'99308',
'99309',
'99310',
'99315',
'99316',
'99318',
'99324',
'99325',
'99326',
'99327',
'99328',
'99334',
'99335',
'99336',
'99337'
}
SNOMEDCT = {
'112690009',
'183430001',
'183921001',
'304567001',
'304568006',
'305336008',
'305340004',
'305381007',
'306804001',
'36723004',
'449411000124106',
'449421000124103',
'449431000124100'
}
class NursingFacilityVisit(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent patients who have had an interaction with a member of their medical team on admission to a nursing facility.
**Data Element Scope:** This value set may use the Quality Data Model (QDM) datatype related to Encounter, Performed.
**Inclusion Criteria:** Includes only relevant concepts associated with services provided to new and established patients in a nursing facility (skilled, intermediate and long-term care facilities).
**Exclusion Criteria:** Excludes visits in settings other than a nursing facility.
"""
OID = '2.16.840.1.113883.3.464.1003.101.12.1012'
VALUE_SET_NAME = 'Nursing Facility Visit'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
CPT = {
'99304',
'99305',
'99306',
'99307',
'99308',
'99309',
'99310',
'99315',
'99316',
'99318'
}
SNOMEDCT = {
'18170008',
'207195004'
}
class Observation(ValueSet):
"""
**Clinical Focus:** This value set contains concepts related to observation visits.
**Data Element Scope:** This value set may use Quality Data Model (QDM) datatype related to Encounter, Performed.
**Inclusion Criteria:** Includes only relevant concepts associated with comprehensive history, evaluation, and management of a patient in an observation care setting. This is a grouping value set of CPT codes.
**Exclusion Criteria:** No exclusions.
"""
OID = '2.16.840.1.113883.3.464.1003.101.12.1086'
VALUE_SET_NAME = 'Observation'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
CPT = {
'99217',
'99218',
'99219',
'99220'
}
class OfficeVisit(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent patients who have had an office or other outpatient visit.
**Data Element Scope:** This value set may use the Quality Data Model (QDM) datatype related to Encounter, Performed.
**Inclusion Criteria:** Includes only relevant concepts associated with comprehensive history, evaluation, and management of a patient in an office or outpatient facility. Patient can be presenting with problems that are minor to high severity. This is a grouping value set of CPT codes.
**Exclusion Criteria:** Excludes non-office visits, including telehealth services.
"""
OID = '2.16.840.1.113883.3.464.1003.101.12.1001'
VALUE_SET_NAME = 'Office Visit'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
CPT = {
'99201',
'99202',
'99203',
'99204',
'99205',
'99212',
'99213',
'99214',
'99215'
}
SNOMEDCT = {
'185463005',
'185464004',
'185465003',
'30346009',
'3391000175108',
'37894004',
'439740005'
}
class Outpatient(ValueSet):
"""
**Clinical Focus:** This value set contains concepts related to outpatient visits.
**Data Element Scope:** This value set may use Quality Data Model (QDM) datatype related to Encounter, Performed.
**Inclusion Criteria:** Includes only relevant concepts associated with comprehensive history, evaluation, and management of a patient in an outpatient setting. This is a grouping value set of CPT and HCPCS codes.
**Exclusion Criteria:** No exclusions.
"""
OID = '2.16.840.1.113883.3.464.1003.101.12.1087'
VALUE_SET_NAME = 'Outpatient'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
CPT = {
'99201',
'99202',
'99203',
'99204',
'99205',
'99211',
'99212',
'99213',
'99214',
'99215',
'99241',
'99242',
'99243',
'99244',
'99245',
'99341',
'99342',
'99343',
'99344',
'99345',
'99347',
'99348',
'99349',
'99350',
'99381',
'99382',
'99383',
'99384',
'99385',
'99386',
'99387',
'99391',
'99392',
'99393',
'99394',
'99395',
'99396',
'99397',
'99401',
'99402',
'99403',
'99404',
'99411',
'99412',
'99429',
'99455',
'99456',
'99483'
}
HCPCSLEVELII = {
'G0402',
'G0438',
'G0439',
'G0463',
'T1015'
}
class OutpatientConsultation(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent patients who have had an outpatient interaction at an office with a member of their medical care team.
**Data Element Scope:** This value set may use the Quality Data Model (QDM) datatype related to Encounter, Performed.
**Inclusion Criteria:** Includes only relevant concepts associated with comprehensive history, evaluation, and management of a patient in an office or outpatient facility. Patient can be presenting with problems that are minor to high severity. This is a grouping value set of CPT and SNOMED CT codes.
**Exclusion Criteria:** Excludes non-office visits, including telehealth services.
"""
OID = '2.16.840.1.113883.3.464.1003.101.12.1008'
VALUE_SET_NAME = 'Outpatient Consultation'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
CPT = {
'99241',
'99242',
'99243',
'99244',
'99245'
}
SNOMEDCT = {
'281036007',
'77406008'
}
class PreventiveCareEstablishedOfficeVisit0To17(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent initial comprehensive preventive medical evaluation, including regular preventive care or care of small problem or preexisting condition that requires no extra work, to be associated with patients 0-17 years of age, that received prior outpatient professional services from the physician practice in the last 3 years.
**Data Element Scope:** This value set may use the Quality Data Model (QDM) datatype related to Encounter, Performed.
**Inclusion Criteria:** Includes only relevant concepts associated with ages 0-17, and indicating initial comprehensive preventive medical evaluation, including regular preventive care or care of small problem or preexisting condition that requires no extra work, for a patient that received prior outpatient professional services from the physician practice in the last 3 years. This is a grouping value set of CPT codes.
**Exclusion Criteria:** Excludes codes that are not for comprehensive preventive medical evaluations and codes that are for patients who have not been seen in the last 3 years.
"""
OID = '2.16.840.1.113883.3.464.1003.101.12.1024'
VALUE_SET_NAME = 'Preventive Care, Established Office Visit, 0 to 17'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
CPT = {
'99391',
'99392',
'99393',
'99394'
}
class PreventiveCareServicesEstablishedOfficeVisit18AndUp(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent patients over the age of 18 who have had an established preventive care office visit.
**Data Element Scope:** This value set may use the Quality Data Model (QDM) datatype related to Encounter, Performed.
**Inclusion Criteria:** Includes only relevant concepts associated with comprehensive preventive medicine reevaluation and management of an individual the age of 18 years or over. This is a grouping value set of CPT codes.
**Exclusion Criteria:** Excludes non-office visits, including telehealth services.
"""
OID = '2.16.840.1.113883.3.464.1003.101.12.1025'
VALUE_SET_NAME = 'Preventive Care Services - Established Office Visit, 18 and Up'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
CPT = {
'99395',
'99396',
'99397'
}
class PreventiveCareServicesGroupCounseling(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent group counseling services.
**Data Element Scope:** This value set may use the Quality Data Model (QDM) datatype related to Encounter, Performed.
**Inclusion Criteria:** Includes only relevant concepts associated with identifying preventive medicine counseling and/or risk factor reduction intervention(s) provided to individuals in a group setting.
**Exclusion Criteria:** No exclusions.
"""
OID = '2.16.840.1.113883.3.464.1003.101.12.1027'
VALUE_SET_NAME = 'Preventive Care Services - Group Counseling'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
CPT = {
'99411',
'99412'
}
class PreventiveCareServicesIndividualCounseling(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent patients who have received preventive medicine counseling and/or risk factor reduction interventions.
**Data Element Scope:** This value set may use the Quality Data Model (QDM) datatype related to Encounter, Performed.
**Inclusion Criteria:** Includes only relevant concepts associated with counseling, anticipatory guidance, and risk factor reduction interventions. Preventative care and individual counseling encounters can be 15 to 60 minutes. This is a grouping of CPT codes.
**Exclusion Criteria:** Excludes codes for services performed in the emergency department, including critical care and observation services.
"""
OID = '2.16.840.1.113883.3.464.1003.101.12.1026'
VALUE_SET_NAME = 'Preventive Care Services-Individual Counseling'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
CPT = {
'99401',
'99402',
'99403',
'99404'
}
class PreventiveCareServicesInitialOfficeVisit0To17(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent initial comprehensive preventive medical evaluation, including regular preventive care or care of small problem or preexisting condition that requires no extra work, to be associated with patients 0-17 years of age, that have no prior outpatient professional services from the physician practice in the last 3 years.
**Data Element Scope:** This value set may use the Quality Data Model (QDM) datatype related to Encounter, Performed.
**Inclusion Criteria:** Includes only relevant concepts associated with ages 0-17, and that indicate initial comprehensive preventive medical evaluation, including regular preventive care or care of small problem or preexisting condition that requires no extra work, for a patient that has no prior outpatient professional services from the physician practice in the last 3 years. This is a grouping value set of CPT codes.
**Exclusion Criteria:** Excludes codes that are not for comprehensive preventive medical evaluations and codes that are for patients who have been seen in the last 3 years.
"""
OID = '2.16.840.1.113883.3.464.1003.101.12.1022'
VALUE_SET_NAME = 'Preventive Care Services, Initial Office Visit, 0 to 17'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
CPT = {
'99381',
'99382',
'99383',
'99384'
}
class PreventiveCareServicesInitialOfficeVisit18AndUp(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent patients over the age of 18 who have had an initial preventive care office visit.
**Data Element Scope:** This value set may use the Quality Data Model (QDM) datatype related to Encounter, Performed.
**Inclusion Criteria:** Includes only relevant concepts associated with comprehensive preventive medicine reevaluation and management of an individual the age of 18 years or over. This is a grouping value set of CPT codes.
**Exclusion Criteria:** Excludes non-office visits, including telehealth services.
"""
OID = '2.16.840.1.113883.3.464.1003.101.12.1023'
VALUE_SET_NAME = 'Preventive Care Services-Initial Office Visit, 18 and Up'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
CPT = {
'99385',
'99386',
'99387'
}
class PreventiveCareServicesOther(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent unlisted preventive medicine services.
**Data Element Scope:** This value set may use the Quality Data Model (QDM) datatype related to Encounter, Performed.
**Inclusion Criteria:** Includes only relevant concepts associated with unlisted preventive medicine services. This is a grouping of a CPT codes.
**Exclusion Criteria:** No exclusions.
"""
OID = '2.16.840.1.113883.3.464.1003.101.12.1030'
VALUE_SET_NAME = 'Preventive Care Services - Other'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
CPT = {
'99429'
}
class PsychotherapyAndPharmacologicManagement(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent psychotherapy services.
**Data Element Scope:** This value set may use the Quality Data Model (QDM) datatype related to Encounter, Performed.
**Inclusion Criteria:** Includes only relevant concepts associated with identifying psychotherapy services.
**Exclusion Criteria:** No exclusions.
"""
OID = '2.16.840.1.113883.3.464.1003.101.12.1055'
VALUE_SET_NAME = 'Psychotherapy and Pharmacologic Management'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
CPT = {
'90845',
'90847',
'90849',
'90853',
'90875',
'90876'
}
class TelehealthServices(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent telehealth services.
**Data Element Scope:** This value set may use the Quality Data Model (QDM) datatype related to Encounter, Performed.
**Inclusion Criteria:** Includes only relevant concepts associated with identifying telehealth services, including telephone and online evaluation and management services.
**Exclusion Criteria:** No exclusions.
"""
OID = '2.16.840.1.113883.3.464.1003.101.12.1031'
VALUE_SET_NAME = 'Telehealth Services'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
CPT = {
'98966',
'98967',
'98968',
'99441',
'99442',
'99443'
}
class TelephoneEvaluation(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent telephone evaluations.
**Data Element Scope:** This value set may use the Quality Data Model (QDM) datatype related to Encounter, Performed.
**Inclusion Criteria:** Includes only relevant concepts associated with identifying evaluation and management services to a patient by telephone. This is a grouping of CPT codes.
**Exclusion Criteria:** Excludes telephone evaluation and management services that last for less than five minutes.
"""
OID = '2.16.840.1.113883.3.464.1003.101.12.1082'
VALUE_SET_NAME = 'Telephone Evaluation'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
CPT = {
'99441',
'99442',
'99443'
}
class TelephoneManagement(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent telephone management.
**Data Element Scope:** This value set may use the Quality Data Model (QDM) datatype related to Encounter, Performed.
**Inclusion Criteria:** Includes only relevant concepts associated with all relevant codes used to identify assessment and management services to a patient by telephone. This is a grouping of CPT codes.
**Exclusion Criteria:** Excludes telephone assessment and management services that last for less than five minutes.
"""
OID = '2.16.840.1.113883.3.464.1003.101.12.1053'
VALUE_SET_NAME = 'Telephone Management'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
CPT = {
'98966',
'98967',
'98968'
}
class TelephoneVisits(ValueSet):
"""
**Clinical Focus:** This value set contains concepts that represent telephone visits.
**Data Element Scope:** This value set may use the Quality Data Model (QDM) datatype related to Encounter, Performed.
**Inclusion Criteria:** Includes only relevant concepts associated with identifying assessment, evaluation and management services to a patient by telephone. This is a grouping of CPT codes.
**Exclusion Criteria:** Excludes telephone assessment, evaluation and management services that last for less than five minutes.
"""
OID = '2.16.840.1.113883.3.464.1003.101.12.1080'
VALUE_SET_NAME = 'Telephone Visits'
EXPANSION_VERSION = 'eCQM Update 2020-05-07'
CPT = {
'98966',
'98967',
'98968',
'99441',
'99442',
'99443'
}
|
460605
|
from __future__ import division
from galpy.potential import SpiralArmsPotential as spiral
import numpy as np
from numpy import pi
from numpy.testing import assert_allclose
from scipy.misc import derivative as deriv
import unittest
class TestSpiralArmsPotential(unittest.TestCase):
def test_constructor(self):
"""Test that constructor initializes and converts units correctly."""
sp = spiral() # default values
assert sp._amp == 1
assert sp._N == -2 # trick to change to left handed coordinate system
assert sp._alpha == -0.2
assert sp._r_ref == 1
assert sp._phi_ref == 0
assert sp._Rs == 0.3
assert sp._H == 0.125
assert sp._Cs == [1]
assert sp._omega == 0
assert sp._rho0 == 1 / (4 * pi)
assert sp.isNonAxi == True
assert sp.hasC == True
assert sp.hasC_dxdv == True
assert sp._ro == 8
assert sp._vo == 220
def test_Rforce(self):
"""Tests Rforce against a numerical derivative -d(Potential) / dR."""
dx = 1e-8
rtol = 1e-5 # relative tolerance
pot = spiral()
assert_allclose(pot.Rforce(1., 0.), -deriv(lambda x: pot(x, 0.), 1., dx=dx), rtol=rtol)
R, z, t = 0.3, 0, 0
assert_allclose(pot.Rforce(R, z, 0, t), -deriv(lambda x: pot(x, z, 0, t), R, dx=dx), rtol=rtol)
assert_allclose(pot.Rforce(R, z, pi/2.2, t), -deriv(lambda x: pot(x, z, pi/2.2, t), R, dx=dx), rtol=rtol)
assert_allclose(pot.Rforce(R, z, pi, t), -deriv(lambda x: pot(x, z, pi, t), R, dx=dx), rtol=rtol)
assert_allclose(pot.Rforce(R, z, 3.7*pi/2, t), -deriv(lambda x: pot(x, z, 3.7*pi/2, t), R, dx=dx), rtol=rtol)
R, z, t = 1, -.7, 3
assert_allclose(pot.Rforce(R, z, 0, t), -deriv(lambda x: pot(x, z, 0, t), R, dx=dx), rtol=rtol)
assert_allclose(pot.Rforce(R, z, pi/2, t), -deriv(lambda x: pot(x, z, pi/2, t), R, dx=dx), rtol=rtol)
assert_allclose(pot.Rforce(R, z, pi, t), -deriv(lambda x: pot(x, z, pi, 0), R, dx=dx), rtol=rtol)
assert_allclose(pot.Rforce(R, z, 3.3*pi/2, t), -deriv(lambda x: pot(x, z, 3.3*pi/2, t), R, dx=dx), rtol=rtol)
R, z = 3.14, .7
assert_allclose(pot.Rforce(R, z, 0), -deriv(lambda x: pot(x, z, 0), R, dx=dx), rtol=rtol)
assert_allclose(pot.Rforce(R, z, pi / 2), -deriv(lambda x: pot(x, z, pi / 2), R, dx=dx), rtol=rtol)
assert_allclose(pot.Rforce(R, z, pi), -deriv(lambda x: pot(x, z, pi), R, dx=dx), rtol=rtol)
assert_allclose(pot.Rforce(R, z, 3*pi/2), -deriv(lambda x: pot(x, z, 3*pi/2), R, dx=dx), rtol=rtol)
pot = spiral(amp=13, N=7, alpha=-0.3, r_ref=0.5, phi_ref=0.3, Rs=0.7, H=0.7, Cs=[1, 2, 3], omega=3)
assert_allclose(pot.Rforce(1., 0.), -deriv(lambda x: pot(x, 0.), 1., dx=dx), rtol=rtol)
assert_allclose(pot.Rforce(0.01, 0.), -deriv(lambda x: pot(x, 0.), 0.01, dx=dx), rtol=rtol)
R, z, t = 0.3, 0, 1.123
assert_allclose(pot.Rforce(R, z, 0, t), -deriv(lambda x: pot(x, z, 0, t), R, dx=dx), rtol=rtol)
assert_allclose(pot.Rforce(R, z, pi/2, t), -deriv(lambda x: pot(x, z, pi/2, t), R, dx=dx), rtol=rtol)
assert_allclose(pot.Rforce(R, z, pi, t), -deriv(lambda x: pot(x, z, pi, t), R, dx=dx), rtol=rtol)
assert_allclose(pot.Rforce(R, z, 3*pi/2, t), -deriv(lambda x: pot(x, z, 3*pi/2, t), R, dx=dx), rtol=rtol)
R, z, t = 1, -.7, 121
assert_allclose(pot.Rforce(R, z, 0, t), -deriv(lambda x: pot(x, z, 0, t), R, dx=dx), rtol=rtol)
assert_allclose(pot.Rforce(R, z, pi / 2, t), -deriv(lambda x: pot(x, z, pi / 2, t), R, dx=dx), rtol=rtol)
assert_allclose(pot.Rforce(R, z, pi, t), -deriv(lambda x: pot(x, z, pi, t), R, dx=dx), rtol=rtol)
assert_allclose(pot.Rforce(R, z, 3*pi/2, t), -deriv(lambda x: pot(x, z, 3*pi/2, t), R, dx=dx), rtol=rtol)
R, z, t = 3.14, .7, 0.123
assert_allclose(pot.Rforce(R, z, 0, t), -deriv(lambda x: pot(x, z, 0, t), R, dx=dx), rtol=rtol)
assert_allclose(pot.Rforce(R, z, pi/2, t), -deriv(lambda x: pot(x, z, pi / 2, t), R, dx=dx), rtol=rtol)
assert_allclose(pot.Rforce(R, z, pi, t), -deriv(lambda x: pot(x, z, pi, t), R, dx=dx), rtol=rtol)
assert_allclose(pot.Rforce(R, z, 3*pi/2, t), -deriv(lambda x: pot(x, z, 3*pi/2, t), R, dx=dx), rtol=rtol)
pot = spiral(amp=13, N=1, alpha=0.01, r_ref=1.12, phi_ref=0, Cs=[1, 1.5, 8.], omega=-3)
assert_allclose(pot.Rforce(1., 0.), -deriv(lambda x: pot(x, 0.), 1., dx=dx), rtol=rtol)
assert_allclose(pot.Rforce(0.1, 0.), -deriv(lambda x: pot(x, 0.), 0.1, dx=dx), rtol=rtol)
R, z, t = 0.3, 0, -4.5
assert_allclose(pot.Rforce(R, z, 0, t), -deriv(lambda x: pot(x, z, 0, t), R, dx=dx), rtol=rtol)
assert_allclose(pot.Rforce(R, z, pi/2, t), -deriv(lambda x: pot(x, z, pi/2, t), R, dx=dx), rtol=rtol)
assert_allclose(pot.Rforce(R, z, pi, t), -deriv(lambda x: pot(x, z, pi, t), R, dx=dx), rtol=rtol)
assert_allclose(pot.Rforce(R, z, 3*pi/2, t), -deriv(lambda x: pot(x, z, 3*pi/2, t), R, dx=dx), rtol=rtol)
R, z, t = 1, -.7, -123
assert_allclose(pot.Rforce(R, z, 0, t), -deriv(lambda x: pot(x, z, 0, t), R, dx=dx), rtol=rtol)
assert_allclose(pot.Rforce(R, z, pi / 2, t), -deriv(lambda x: pot(x, z, pi / 2, t), R, dx=dx), rtol=rtol)
assert_allclose(pot.Rforce(R, z, pi, t), -deriv(lambda x: pot(x, z, pi, t), R, dx=dx), rtol=rtol)
assert_allclose(pot.Rforce(R, z, 3*pi/2, t), -deriv(lambda x: pot(x, z, 3*pi/2, t), R, dx=dx), rtol=rtol)
R, z, t = 3.14, .7, -123.123
assert_allclose(pot.Rforce(R, z, 0, t), -deriv(lambda x: pot(x, z, 0, t), R, dx=dx), rtol=rtol)
assert_allclose(pot.Rforce(R, z, pi/2, t), -deriv(lambda x: pot(x, z, pi/2, t), R, dx=dx), rtol=rtol)
assert_allclose(pot.Rforce(R, z, pi, t), -deriv(lambda x: pot(x, z, pi, t), R, dx=dx), rtol=rtol)
assert_allclose(pot.Rforce(R, z, 3*pi/2, t), -deriv(lambda x: pot(x, z, 3*pi/2, t), R, dx=dx), rtol=rtol)
pot = spiral(N=10, r_ref=15, phi_ref=5, Cs=[8./(3.*pi), 0.5, 8./(15.*pi)])
assert_allclose(pot.Rforce(1., 0.), -deriv(lambda x: pot(x, 0.), 1., dx=dx), rtol=rtol)
assert_allclose(pot.Rforce(0.01, 0.), -deriv(lambda x: pot(x, 0.), 0.01, dx=dx), rtol=rtol)
R, z = 0.3, 0
assert_allclose(pot.Rforce(R, z, 0), -deriv(lambda x: pot(x, z, 0), R, dx=dx), rtol=rtol)
assert_allclose(pot.Rforce(R, z, pi/2.1), -deriv(lambda x: pot(x, z, pi/2.1), R, dx=dx), rtol=rtol)
assert_allclose(pot.Rforce(R, z, 1.3*pi), -deriv(lambda x: pot(x, z, 1.3*pi), R, dx=dx), rtol=rtol)
assert_allclose(pot.Rforce(R, z, 3*pi/2), -deriv(lambda x: pot(x, z, 3*pi/2), R, dx=dx), rtol=rtol)
R, z = 1, -.7
assert_allclose(pot.Rforce(R, z, 0), -deriv(lambda x: pot(x, z, 0), R, dx=dx), rtol=rtol)
assert_allclose(pot.Rforce(R, z, pi / 2), -deriv(lambda x: pot(x, z, pi / 2), R, dx=dx), rtol=rtol)
assert_allclose(pot.Rforce(R, z, .9*pi), -deriv(lambda x: pot(x, z, .9*pi), R, dx=dx), rtol=rtol)
assert_allclose(pot.Rforce(R, z, 3.3*pi/2), -deriv(lambda x: pot(x, z, 3.3*pi/2), R, dx=dx), rtol=rtol)
R, z = 3.14, .7
assert_allclose(pot.Rforce(R, z, 0), -deriv(lambda x: pot(x, z, 0), R, dx=dx), rtol=rtol)
assert_allclose(pot.Rforce(R, z, pi / 2.3), -deriv(lambda x: pot(x, z, pi / 2.3), R, dx=dx), rtol=rtol)
assert_allclose(pot.Rforce(R, z, 1.1*pi), -deriv(lambda x: pot(x, z, 1.1*pi), R, dx=dx), rtol=rtol)
assert_allclose(pot.Rforce(R, z, 3.5*pi/2), -deriv(lambda x: pot(x, z, 3.5*pi/2), R, dx=dx), rtol=rtol)
def test_zforce(self):
"""Test zforce against a numerical derivative -d(Potential) / dz"""
dx = 1e-8
rtol = 1e-6 # relative tolerance
pot = spiral()
# zforce is zero in the plane of the galaxy
assert_allclose(0, pot.zforce(0.3, 0, 0), rtol=rtol)
assert_allclose(0, pot.zforce(0.3, 0, pi/2), rtol=rtol)
assert_allclose(0, pot.zforce(0.3, 0, pi), rtol=rtol)
assert_allclose(0, pot.zforce(0.3, 0, 3*pi/2), rtol=rtol)
# test zforce against -dPhi/dz
R, z = 1, -.7
assert_allclose(pot.zforce(R, z, 0), -deriv(lambda x: pot(R, x, 0), z, dx=dx), rtol=rtol)
assert_allclose(pot.zforce(R, z, pi/2), -deriv(lambda x: pot(R, x, pi/2), z, dx=dx), rtol=rtol)
assert_allclose(pot.zforce(R, z, pi), -deriv(lambda x: pot(R, x, pi), z, dx=dx), rtol=rtol)
assert_allclose(pot.zforce(R, z, 3*pi/2), -deriv(lambda x: pot(R, x, 3*pi/2), z, dx=dx), rtol=rtol)
R, z = 3.7, .7
assert_allclose(pot.zforce(R, z, 0), -deriv(lambda x: pot(R, x, 0), z, dx=dx), rtol=rtol)
assert_allclose(pot.zforce(R, z, pi/2), -deriv(lambda x: pot(R, x, pi/2), z, dx=dx), rtol=rtol)
assert_allclose(pot.zforce(R, z, pi), -deriv(lambda x: pot(R, x, pi), z, dx=dx), rtol=rtol)
assert_allclose(pot.zforce(R, z, 3*pi/2), -deriv(lambda x: pot(R, x, 3*pi/2), z, dx=dx), rtol=rtol)
pot = spiral(amp=13, N=3, alpha=-.3, r_ref=0.5, phi_ref=0.3, Rs=0.7, H=0.7, Cs=[1, 2], omega=3)
# zforce is zero in the plane of the galaxy
assert_allclose(0, pot.zforce(0.3, 0, 0, 1), rtol=rtol)
assert_allclose(0, pot.zforce(0.6, 0, pi/2, 2), rtol=rtol)
assert_allclose(0, pot.zforce(0.9, 0, pi, 3), rtol=rtol)
assert_allclose(0, pot.zforce(1.2, 0, 2*pi, 4), rtol=rtol)
# test zforce against -dPhi/dz
R, z, t = 1, -.7, 123
assert_allclose(pot.zforce(R, z, 0, t), -deriv(lambda x: pot(R, x, 0, t), z, dx=dx), rtol=rtol)
assert_allclose(pot.zforce(R, z, pi/2, t), -deriv(lambda x: pot(R, x, pi/2, t), z, dx=dx), rtol=rtol)
assert_allclose(pot.zforce(R, z, pi, t), -deriv(lambda x: pot(R, x, pi, t), z, dx=dx), rtol=rtol)
assert_allclose(pot.zforce(R, z, 3*pi/2, t), -deriv(lambda x: pot(R, x, 3*pi/2, t), z, dx=dx), rtol=rtol)
R, z = 3.7, .7
assert_allclose(pot.zforce(R, z, 0), -deriv(lambda x: pot(R, x, 0), z, dx=dx), rtol=rtol)
assert_allclose(pot.zforce(R, z, pi/2), -deriv(lambda x: pot(R, x, pi/2), z, dx=dx), rtol=rtol)
assert_allclose(pot.zforce(R, z, pi), -deriv(lambda x: pot(R, x, pi), z, dx=dx), rtol=rtol)
assert_allclose(pot.zforce(R, z, 3*pi/2), -deriv(lambda x: pot(R, x, 3*pi/2), z, dx=dx), rtol=rtol)
pot = spiral(N=1, alpha=-0.2, r_ref=.5, Cs=[1, 1.5], omega=-3)
# zforce is zero in the plane of the galaxy
assert_allclose(0, pot.zforce(0.3, 0, 0, 123), rtol=rtol)
assert_allclose(0, pot.zforce(0.3, 0, pi/2, -321), rtol=rtol)
assert_allclose(0, pot.zforce(32, 0, pi, 1.23), rtol=rtol)
assert_allclose(0, pot.zforce(0.123, 0, 3.33*pi/2, -3.21), rtol=rtol)
# test zforce against -dPhi/dz
R, z = 1, -1.5
assert_allclose(pot.zforce(R, z, 0), -deriv(lambda x: pot(R, x, 0), z, dx=dx), rtol=rtol)
assert_allclose(pot.zforce(R, z, pi/2), -deriv(lambda x: pot(R, x, pi/2), z, dx=dx), rtol=rtol)
assert_allclose(pot.zforce(R, z, pi), -deriv(lambda x: pot(R, x, pi), z, dx=dx), rtol=rtol)
assert_allclose(pot.zforce(R, z, 3*pi/2.1), -deriv(lambda x: pot(R, x, 3*pi/2.1), z, dx=dx), rtol=rtol)
R, z, t = 3.7, .7, -100
assert_allclose(pot.zforce(R, z, 0, t), -deriv(lambda x: pot(R, x, 0, t), z, dx=dx), rtol=rtol)
assert_allclose(pot.zforce(R, z, pi/2, t), -deriv(lambda x: pot(R, x, pi/2, t), z, dx=dx), rtol=rtol)
assert_allclose(pot.zforce(R, z, pi, t), -deriv(lambda x: pot(R, x, pi, t), z, dx=dx), rtol=rtol)
assert_allclose(pot.zforce(R, z, 3.4*pi/2, t), -deriv(lambda x: pot(R, x, 3.4*pi/2, t), z, dx=dx), rtol=rtol)
pot = spiral(N=5, r_ref=1.5, phi_ref=0.5, Cs=[8./(3.*pi), 0.5, 8./(15.*pi)])
# zforce is zero in the plane of the galaxy
assert_allclose(0, pot.zforce(0.3, 0, 0), rtol=rtol)
assert_allclose(0, pot.zforce(0.4, 0, pi/2), rtol=rtol)
assert_allclose(0, pot.zforce(0.5, 0, pi*1.1), rtol=rtol)
assert_allclose(0, pot.zforce(0.6, 0, 3*pi/2), rtol=rtol)
# test zforce against -dPhi/dz
R, z = 1, -.7
assert_allclose(pot.zforce(R, z, 0), -deriv(lambda x: pot(R, x, 0), z, dx=dx), rtol=rtol)
assert_allclose(pot.zforce(R, z, pi/2), -deriv(lambda x: pot(R, x, pi/2), z, dx=dx), rtol=rtol)
assert_allclose(pot.zforce(R, z, pi), -deriv(lambda x: pot(R, x, pi), z, dx=dx), rtol=rtol)
assert_allclose(pot.zforce(R, z, 3*pi/2), -deriv(lambda x: pot(R, x, 3*pi/2), z, dx=dx), rtol=rtol)
R, z = 37, 1.7
assert_allclose(pot.zforce(R, z, 0), -deriv(lambda x: pot(R, x, 0), z, dx=dx), rtol=rtol)
assert_allclose(pot.zforce(R, z, pi/2), -deriv(lambda x: pot(R, x, pi/2), z, dx=dx), rtol=rtol)
assert_allclose(pot.zforce(R, z, pi), -deriv(lambda x: pot(R, x, pi), z, dx=dx), rtol=rtol)
assert_allclose(pot.zforce(R, z, 3*pi/2), -deriv(lambda x: pot(R, x, 3*pi/2), z, dx=dx), rtol=rtol)
def test_phiforce(self):
"""Test phiforce against a numerical derivative -d(Potential) / d(phi)."""
dx = 1e-8
rtol = 1e-5 # relative tolerance
pot = spiral()
R, z = .3, 0
assert_allclose(pot.phiforce(R, z, 0), -deriv(lambda x: pot(R, z, x), 0, dx=dx), rtol=rtol)
assert_allclose(pot.phiforce(R, z, pi/2), -deriv(lambda x: pot(R, z, x), pi/2, dx=dx), rtol=rtol)
assert_allclose(pot.phiforce(R, z, pi), -deriv(lambda x: pot(R, z, x), pi, dx=dx), rtol=rtol)
assert_allclose(pot.phiforce(R, z, 3*pi/2), -deriv(lambda x: pot(R, z, x), 3*pi/2, dx=dx), rtol=rtol)
R, z = .1, -.3
assert_allclose(pot.phiforce(R, z, 0), -deriv(lambda x: pot(R, z, x), 0, dx=dx), rtol=rtol)
assert_allclose(pot.phiforce(R, z, pi/2), -deriv(lambda x: pot(R, z, x), pi/2, dx=dx), rtol=rtol)
assert_allclose(pot.phiforce(R, z, pi), -deriv(lambda x: pot(R, z, x), pi, dx=dx), rtol=rtol)
assert_allclose(pot.phiforce(R, z, 3*pi/2), -deriv(lambda x: pot(R, z, x), 3*pi/2, dx=dx), rtol=rtol)
R, z = 3, 7
assert_allclose(pot.phiforce(R, z, 0), -deriv(lambda x: pot(R, z, x), 0, dx=dx), rtol=rtol)
assert_allclose(pot.phiforce(R, z, pi/2.1), -deriv(lambda x: pot(R, z, x), pi/2.1, dx=dx), rtol=rtol)
assert_allclose(pot.phiforce(R, z, pi), -deriv(lambda x: pot(R, z, x), pi, dx=dx), rtol=rtol)
assert_allclose(pot.phiforce(R, z, 3*pi/2), -deriv(lambda x: pot(R, z, x), 3*pi/2, dx=dx), rtol=rtol)
pot = spiral(N=7, alpha=-0.3, r_ref=0.5, phi_ref=0.3, Rs=0.7, H=0.7, Cs=[1, 1, 1], omega=2*pi)
R, z, t = .3, 0, 1.2
assert_allclose(pot.phiforce(R, z, 0, 0), -deriv(lambda x: pot(R, z, x, 0), 0, dx=dx), rtol=rtol)
assert_allclose(pot.phiforce(R, z, pi/2, t), -deriv(lambda x: pot(R, z, x, t), pi/2, dx=dx), rtol=rtol)
assert_allclose(pot.phiforce(R, z, pi, t), -deriv(lambda x: pot(R, z, x, t), pi, dx=dx), rtol=rtol)
assert_allclose(pot.phiforce(R, z, 3*pi/2, t), -deriv(lambda x: pot(R, z, x, t), 3*pi/2, dx=dx), rtol=rtol)
R, z = 1, -.7
assert_allclose(pot.phiforce(R, z, 0), -deriv(lambda x: pot(R, z, x), 0, dx=dx), rtol=rtol)
assert_allclose(pot.phiforce(R, z, pi/2), -deriv(lambda x: pot(R, z, x), pi/2, dx=dx), rtol=rtol)
assert_allclose(pot.phiforce(R, z, pi), -deriv(lambda x: pot(R, z, x), pi, dx=dx), rtol=rtol)
assert_allclose(pot.phiforce(R, z, 3*pi/2), -deriv(lambda x: pot(R, z, x), 3*pi/2, dx=dx), rtol=rtol)
R, z, t = 3.7, .7, -5.1
assert_allclose(pot.phiforce(R, z, 0, t), -deriv(lambda x: pot(R, z, x, t), 0, dx=dx), rtol=rtol)
assert_allclose(pot.phiforce(R, z, pi/2, t), -deriv(lambda x: pot(R, z, x, t), pi/2, dx=dx), rtol=rtol)
assert_allclose(pot.phiforce(R, z, pi, t), -deriv(lambda x: pot(R, z, x, t), pi, dx=dx), rtol=rtol)
assert_allclose(pot.phiforce(R, z, 3.2*pi/2, t), -deriv(lambda x: pot(R, z, x, t), 3.2*pi/2, dx=dx), rtol=rtol)
pot = spiral(N=1, alpha=0.1, phi_ref=0, Cs=[1, 1.5], omega=-.333)
R, z = .3, 0
assert_allclose(pot.phiforce(R, z, 0), -deriv(lambda x: pot(R, z, x), 0, dx=dx), rtol=rtol)
assert_allclose(pot.phiforce(R, z, pi/2), -deriv(lambda x: pot(R, z, x), pi/2, dx=dx), rtol=rtol)
assert_allclose(pot.phiforce(R, z, pi), -deriv(lambda x: pot(R, z, x), pi, dx=dx), rtol=rtol)
assert_allclose(pot.phiforce(R, z, 3.2*pi/2), -deriv(lambda x: pot(R, z, x), 3.2*pi/2, dx=dx), rtol=rtol)
R, z, t = 1, -.7, 123
assert_allclose(pot.phiforce(R, z, 0, t), -deriv(lambda x: pot(R, z, x, t), 0, dx=dx), rtol=rtol)
assert_allclose(pot.phiforce(R, z, pi/2, t), -deriv(lambda x: pot(R, z, x, t), pi/2, dx=dx), rtol=rtol)
assert_allclose(pot.phiforce(R, z, pi, t), -deriv(lambda x: pot(R, z, x, t), pi, dx=dx), rtol=rtol)
assert_allclose(pot.phiforce(R, z, 3*pi/2, t), -deriv(lambda x: pot(R, z, x, t), 3*pi/2, dx=dx), rtol=rtol)
R, z, t = 3, 4, 5
assert_allclose(pot.phiforce(R, z, 0, t), -deriv(lambda x: pot(R, z, x, t), 0, dx=dx), rtol=rtol)
assert_allclose(pot.phiforce(R, z, pi/2, t), -deriv(lambda x: pot(R, z, x, t), pi/2, dx=dx), rtol=rtol)
assert_allclose(pot.phiforce(R, z, pi, t), -deriv(lambda x: pot(R, z, x, t), pi, dx=dx), rtol=rtol)
assert_allclose(pot.phiforce(R, z, 3*pi/2, t), -deriv(lambda x: pot(R, z, x, t), 3*pi/2, dx=dx), rtol=rtol)
pot = spiral(N=4, r_ref=1.5, phi_ref=5, Cs=[8./(3.*pi), 0.5, 8./(15.*pi)])
R, z = .3, 0
assert_allclose(pot.phiforce(R, z, 0), -deriv(lambda x: pot(R, z, x), 0, dx=dx), rtol=rtol)
assert_allclose(pot.phiforce(R, z, pi/2), -deriv(lambda x: pot(R, z, x), pi/2, dx=dx), rtol=rtol)
assert_allclose(pot.phiforce(R, z, pi), -deriv(lambda x: pot(R, z, x), pi, dx=dx), rtol=rtol)
assert_allclose(pot.phiforce(R, z, 3*pi/2), -deriv(lambda x: pot(R, z, x), 3*pi/2, dx=dx), rtol=rtol)
R, z = 1, -.7
assert_allclose(pot.phiforce(R, z, 0), -deriv(lambda x: pot(R, z, x), 0, dx=dx), rtol=rtol)
assert_allclose(pot.phiforce(R, z, pi/2), -deriv(lambda x: pot(R, z, x), pi/2, dx=dx), rtol=rtol)
assert_allclose(pot.phiforce(R, z, pi), -deriv(lambda x: pot(R, z, x), pi, dx=dx), rtol=rtol)
assert_allclose(pot.phiforce(R, z, 3*pi/2), -deriv(lambda x: pot(R, z, x), 3*pi/2, dx=dx), rtol=rtol)
R, z = 2.1, .12345
assert_allclose(pot.phiforce(R, z, 0), -deriv(lambda x: pot(R, z, x), 0, dx=dx), rtol=rtol)
assert_allclose(pot.phiforce(R, z, pi/2), -deriv(lambda x: pot(R, z, x), pi/2, dx=dx), rtol=rtol)
assert_allclose(pot.phiforce(R, z, pi), -deriv(lambda x: pot(R, z, x), pi, dx=dx), rtol=rtol)
assert_allclose(pot.phiforce(R, z, 2*pi), -deriv(lambda x: pot(R, z, x), 2*pi, dx=dx), rtol=rtol)
def test_R2deriv(self):
"""Test R2deriv against a numerical derivative -d(Rforce) / dR."""
dx = 1e-8
rtol = 1e-6 # relative tolerance
pot = spiral()
assert_allclose(pot.R2deriv(1., 0.), -deriv(lambda x: pot.Rforce(x, 0.), 1., dx=dx), rtol=rtol)
R, z = 0.3, 0
assert_allclose(pot.R2deriv(R, z, 0), -deriv(lambda x: pot.Rforce(x, z, 0), R, dx=dx), rtol=rtol)
assert_allclose(pot.R2deriv(R, z, pi / 2), -deriv(lambda x: pot.Rforce(x, z, pi/2), R, dx=dx), rtol=rtol)
assert_allclose(pot.R2deriv(R, z, pi), -deriv(lambda x: pot.Rforce(x, z, pi), R, dx=dx), rtol=rtol)
assert_allclose(pot.R2deriv(R, z, 3.1*pi / 2), -deriv(lambda x: pot.Rforce(x, z, 3.1*pi/2), R, dx=dx), rtol=rtol)
R, z = 1, -.7
assert_allclose(pot.R2deriv(R, z, 0), -deriv(lambda x: pot.Rforce(x, z, 0), R, dx=dx), rtol=rtol)
assert_allclose(pot.R2deriv(R, z, pi / 2), -deriv(lambda x: pot.Rforce(x, z, pi / 2), R, dx=dx), rtol=rtol)
assert_allclose(pot.R2deriv(R, z, pi), -deriv(lambda x: pot.Rforce(x, z, pi), R, dx=dx), rtol=rtol)
assert_allclose(pot.R2deriv(R, z, 2*pi), -deriv(lambda x: pot.Rforce(x, z, 2*pi), R, dx=dx), rtol=rtol)
R, z = 5, .9
assert_allclose(pot.R2deriv(R, z, 0), -deriv(lambda x: pot.Rforce(x, z, 0), R, dx=dx), rtol=rtol)
assert_allclose(pot.R2deriv(R, z, pi / 2), -deriv(lambda x: pot.Rforce(x, z, pi / 2), R, dx=dx), rtol=rtol)
assert_allclose(pot.R2deriv(R, z, pi), -deriv(lambda x: pot.Rforce(x, z, pi), R, dx=dx), rtol=rtol)
assert_allclose(pot.R2deriv(R, z, 3 * pi / 2), -deriv(lambda x: pot.Rforce(x, z, 3*pi/2), R, dx=dx), rtol=rtol)
# pot = spiral(N=1, alpha=-.3, r_ref=.1, phi_ref=pi, Rs=1, H=1, Cs=[1, 2, 3], omega=3)
# assert_allclose(pot.R2deriv(1e-3, 0.), -deriv(lambda x: pot.Rforce(x, 0.), 1e-3, dx=dx), rtol=rtol)
# assert_allclose(pot.R2deriv(1., 0.), -deriv(lambda x: pot.Rforce(x, 0.), 1., dx=dx), rtol=rtol)
# R, z = 0.3, 0
# assert_allclose(pot.R2deriv(R, z, 0), -deriv(lambda x: pot.Rforce(x, z, 0), R, dx=dx), rtol=rtol)
# assert_allclose(pot.R2deriv(R, z, pi / 2), -deriv(lambda x: pot.Rforce(x, z, pi/2), R, dx=dx), rtol=rtol)
# assert_allclose(pot.R2deriv(R, z, pi), -deriv(lambda x: pot.Rforce(x, z, pi), R, dx=dx), rtol=rtol)
# assert_allclose(pot.R2deriv(R, z, 3 * pi / 2), -deriv(lambda x: pot.Rforce(x, z, 3*pi/2), R, dx=dx), rtol=rtol)
# R, z = 1, -.7
# assert_allclose(pot.R2deriv(R, z, 0), -deriv(lambda x: pot.Rforce(x, z, 0), R, dx=dx), rtol=rtol)
# assert_allclose(pot.R2deriv(R, z, pi / 2), -deriv(lambda x: pot.Rforce(x, z, pi / 2), R, dx=dx), rtol=rtol)
# assert_allclose(pot.R2deriv(R, z, pi), -deriv(lambda x: pot.Rforce(x, z, pi), R, dx=dx), rtol=rtol)
# assert_allclose(pot.R2deriv(R, z, 3.1*pi/2), -deriv(lambda x: pot.Rforce(x, z, 3.1*pi/2), R, dx=dx), rtol=rtol)
# R, z = 5, .9
# assert_allclose(pot.R2deriv(R, z, 0), -deriv(lambda x: pot.Rforce(x, z, 0), R, dx=dx), rtol=rtol)
# assert_allclose(pot.R2deriv(R, z, pi / 2.4), -deriv(lambda x: pot.Rforce(x, z, pi / 2.4), R, dx=dx), rtol=rtol)
# assert_allclose(pot.R2deriv(R, z, pi), -deriv(lambda x: pot.Rforce(x, z, pi), R, dx=dx), rtol=rtol)
# assert_allclose(pot.R2deriv(R, z, 3 * pi / 2), -deriv(lambda x: pot.Rforce(x, z, 3*pi/2), R, dx=dx), rtol=rtol)
#
# pot = spiral(N=7, alpha=.1, r_ref=1, phi_ref=1, Rs=1.1, H=.1, Cs=[8./(3.*pi), 0.5, 8./(15.*pi)], omega=-.3)
# assert_allclose(pot.R2deriv(1., 0.), -deriv(lambda x: pot.Rforce(x, 0.), 1., dx=dx), rtol=rtol)
# R, z = 0.3, 0
# assert_allclose(pot.R2deriv(R, z, 0), -deriv(lambda x: pot.Rforce(x, z, 0), R, dx=dx), rtol=rtol)
# assert_allclose(pot.R2deriv(R, z, pi/2), -deriv(lambda x: pot.Rforce(x, z, pi/2), R, dx=dx), rtol=rtol)
# assert_allclose(pot.R2deriv(R, z, pi), -deriv(lambda x: pot.Rforce(x, z, pi), R, dx=dx), rtol=rtol)
# assert_allclose(pot.R2deriv(R, z, 3 * pi / 2), -deriv(lambda x: pot.Rforce(x, z, 3*pi/2), R, dx=dx), rtol=rtol)
# R, z = 1, -.7
# assert_allclose(pot.R2deriv(R, z, 0), -deriv(lambda x: pot.Rforce(x, z, 0), R, dx=dx), rtol=rtol)
# assert_allclose(pot.R2deriv(R, z, pi / 2), -deriv(lambda x: pot.Rforce(x, z, pi / 2), R, dx=dx), rtol=rtol)
# assert_allclose(pot.R2deriv(R, z, pi), -deriv(lambda x: pot.Rforce(x, z, pi), R, dx=dx), rtol=rtol)
# assert_allclose(pot.R2deriv(R, z, 3 * pi / 2), -deriv(lambda x: pot.Rforce(x, z, 3*pi/2), R, dx=dx), rtol=rtol)
# R, z = 5, .9
# assert_allclose(pot.R2deriv(R, z, 0), -deriv(lambda x: pot.Rforce(x, z, 0), R, dx=dx), rtol=rtol)
# assert_allclose(pot.R2deriv(R, z, pi / 2), -deriv(lambda x: pot.Rforce(x, z, pi / 2), R, dx=dx), rtol=rtol)
# assert_allclose(pot.R2deriv(R, z, pi), -deriv(lambda x: pot.Rforce(x, z, pi), R, dx=dx), rtol=rtol)
# assert_allclose(pot.R2deriv(R, z, 3 * pi / 2), -deriv(lambda x: pot.Rforce(x, z, 3*pi/2), R, dx=dx), rtol=rtol)
#
# pot = spiral(N=4, alpha=pi/2, r_ref=1, phi_ref=1, Rs=.7, H=.77, Cs=[3, 4], omega=-1.3)
# assert_allclose(pot.R2deriv(1e-3, 0.), -deriv(lambda x: pot.Rforce(x, 0.), 1e-3, dx=dx), rtol=rtol)
# assert_allclose(pot.R2deriv(1., 0.), -deriv(lambda x: pot.Rforce(x, 0.), 1., dx=dx), rtol=rtol)
# R, z = 0.3, 0
# assert_allclose(pot.R2deriv(R, z, 0), -deriv(lambda x: pot.Rforce(x, z, 0), R, dx=dx), rtol=rtol)
# assert_allclose(pot.R2deriv(R, z, pi / 2), -deriv(lambda x: pot.Rforce(x, z, pi/2), R, dx=dx), rtol=rtol)
# assert_allclose(pot.R2deriv(R, z, pi), -deriv(lambda x: pot.Rforce(x, z, pi), R, dx=dx), rtol=rtol)
# assert_allclose(pot.R2deriv(R, z, 3 * pi / 2), -deriv(lambda x: pot.Rforce(x, z, 3*pi/2), R, dx=dx), rtol=rtol)
# R, z = 1, -.7
# assert_allclose(pot.R2deriv(R, z, 0), -deriv(lambda x: pot.Rforce(x, z, 0), R, dx=dx), rtol=rtol)
# assert_allclose(pot.R2deriv(R, z, pi / 2), -deriv(lambda x: pot.Rforce(x, z, pi / 2), R, dx=dx), rtol=rtol)
# assert_allclose(pot.R2deriv(R, z, pi), -deriv(lambda x: pot.Rforce(x, z, pi), R, dx=dx), rtol=rtol)
# assert_allclose(pot.R2deriv(R, z, .33*pi/2), -deriv(lambda x: pot.Rforce(x, z, .33*pi/2), R, dx=dx), rtol=rtol)
def test_z2deriv(self):
"""Test z2deriv against a numerical derivative -d(zforce) / dz"""
dx = 1e-8
rtol = 1e-6 # relative tolerance
pot = spiral()
R, z = .3, 0
assert_allclose(pot.z2deriv(R, z, 0), -deriv(lambda x: pot.zforce(R, x, 0), z, dx=dx), rtol=rtol)
assert_allclose(pot.z2deriv(R, z, pi/2), -deriv(lambda x: pot.zforce(R, x, pi/2), z, dx=dx), rtol=rtol)
assert_allclose(pot.z2deriv(R, z, pi), -deriv(lambda x: pot.zforce(R, x, pi), z, dx=dx), rtol=rtol)
assert_allclose(pot.z2deriv(R, z, 3*pi/2), -deriv(lambda x: pot.zforce(R, x, 3*pi/2), z, dx=dx), rtol=rtol)
R, z = 1, -.3
assert_allclose(pot.z2deriv(R, z, 0), -deriv(lambda x: pot.zforce(R, x, 0), z, dx=dx), rtol=rtol)
assert_allclose(pot.z2deriv(R, z, pi/2), -deriv(lambda x: pot.zforce(R, x, pi/2), z, dx=dx), rtol=rtol)
assert_allclose(pot.z2deriv(R, z, pi), -deriv(lambda x: pot.zforce(R, x, pi), z, dx=dx), rtol=rtol)
assert_allclose(pot.z2deriv(R, z, 3*pi/2), -deriv(lambda x: pot.zforce(R, x, 3*pi/2), z, dx=dx), rtol=rtol)
R, z = 1.2, .1
assert_allclose(pot.z2deriv(R, z, 0), -deriv(lambda x: pot.zforce(R, x, 0), z, dx=dx), rtol=rtol)
assert_allclose(pot.z2deriv(R, z, pi/2), -deriv(lambda x: pot.zforce(R, x, pi/2), z, dx=dx), rtol=rtol)
assert_allclose(pot.z2deriv(R, z, pi), -deriv(lambda x: pot.zforce(R, x, pi), z, dx=dx), rtol=rtol)
assert_allclose(pot.z2deriv(R, z, 3*pi/2), -deriv(lambda x: pot.zforce(R, x, 3*pi/2), z, dx=dx), rtol=rtol)
pot = spiral(N=3, alpha=-0.3, r_ref=.25, Cs=[8./(3.*pi), 0.5, 8./(15.*pi)])
R, z = .3, 0
assert_allclose(pot.z2deriv(R, z, 0), -deriv(lambda x: pot.zforce(R, x, 0), z, dx=dx), rtol=rtol)
assert_allclose(pot.z2deriv(R, z, pi/2), -deriv(lambda x: pot.zforce(R, x, pi/2), z, dx=dx), rtol=rtol)
assert_allclose(pot.z2deriv(R, z, pi), -deriv(lambda x: pot.zforce(R, x, pi), z, dx=dx), rtol=rtol)
assert_allclose(pot.z2deriv(R, z, 3*pi/2), -deriv(lambda x: pot.zforce(R, x, 3*pi/2), z, dx=dx), rtol=rtol)
R, z = 1, -.3
assert_allclose(pot.z2deriv(R, z, 0), -deriv(lambda x: pot.zforce(R, x, 0), z, dx=dx), rtol=rtol)
assert_allclose(pot.z2deriv(R, z, pi/2), -deriv(lambda x: pot.zforce(R, x, pi/2), z, dx=dx), rtol=rtol)
assert_allclose(pot.z2deriv(R, z, pi), -deriv(lambda x: pot.zforce(R, x, pi), z, dx=dx), rtol=rtol)
assert_allclose(pot.z2deriv(R, z, 3*pi/2), -deriv(lambda x: pot.zforce(R, x, 3*pi/2), z, dx=dx), rtol=rtol)
R, z = 3.3, .7
assert_allclose(pot.z2deriv(R, z, 0), -deriv(lambda x: pot.zforce(R, x, 0), z, dx=dx), rtol=rtol)
assert_allclose(pot.z2deriv(R, z, pi/2), -deriv(lambda x: pot.zforce(R, x, pi/2), z, dx=dx), rtol=rtol)
assert_allclose(pot.z2deriv(R, z, pi), -deriv(lambda x: pot.zforce(R, x, pi), z, dx=dx), rtol=rtol)
assert_allclose(pot.z2deriv(R, z, 3*pi/2), -deriv(lambda x: pot.zforce(R, x, 3*pi/2), z, dx=dx), rtol=rtol)
pot = spiral(amp=5, N=1, alpha=0.1, r_ref=0.5, phi_ref=0.3, Rs=0.7, H=0.7, Cs=[1, 2], omega=3)
R, z = .3, 0
assert_allclose(pot.z2deriv(R, z, 0), -deriv(lambda x: pot.zforce(R, x, 0), z, dx=dx), rtol=rtol)
assert_allclose(pot.z2deriv(R, z, pi/2), -deriv(lambda x: pot.zforce(R, x, pi/2), z, dx=dx), rtol=rtol)
assert_allclose(pot.z2deriv(R, z, pi), -deriv(lambda x: pot.zforce(R, x, pi), z, dx=dx), rtol=rtol)
assert_allclose(pot.z2deriv(R, z, 3*pi/2), -deriv(lambda x: pot.zforce(R, x, 3*pi/2), z, dx=dx), rtol=rtol)
R, z = 1, -.3
assert_allclose(pot.z2deriv(R, z, 0), -deriv(lambda x: pot.zforce(R, x, 0), z, dx=dx), rtol=rtol)
assert_allclose(pot.z2deriv(R, z, pi/2), -deriv(lambda x: pot.zforce(R, x, pi/2), z, dx=dx), rtol=rtol)
assert_allclose(pot.z2deriv(R, z, pi), -deriv(lambda x: pot.zforce(R, x, pi), z, dx=dx), rtol=rtol)
assert_allclose(pot.z2deriv(R, z, 3*pi/2), -deriv(lambda x: pot.zforce(R, x, 3*pi/2), z, dx=dx), rtol=rtol)
R, z = 3.3, .7
assert_allclose(pot.z2deriv(R, z, 0), -deriv(lambda x: pot.zforce(R, x, 0), z, dx=dx), rtol=rtol)
assert_allclose(pot.z2deriv(R, z, pi/2), -deriv(lambda x: pot.zforce(R, x, pi/2), z, dx=dx), rtol=rtol)
assert_allclose(pot.z2deriv(R, z, pi), -deriv(lambda x: pot.zforce(R, x, pi), z, dx=dx), rtol=rtol)
assert_allclose(pot.z2deriv(R, z, 3*pi/2), -deriv(lambda x: pot.zforce(R, x, 3*pi/2), z, dx=dx), rtol=rtol)
pot = spiral(N=1, alpha=1, r_ref=3, phi_ref=pi, Cs=[1, 2], omega=-3)
R, z = .7, 0
assert_allclose(pot.z2deriv(R, z, 0), -deriv(lambda x: pot.zforce(R, x, 0), z, dx=dx), rtol=rtol)
assert_allclose(pot.z2deriv(R, z, pi/2), -deriv(lambda x: pot.zforce(R, x, pi/2), z, dx=dx), rtol=rtol)
assert_allclose(pot.z2deriv(R, z, pi), -deriv(lambda x: pot.zforce(R, x, pi), z, dx=dx), rtol=rtol)
assert_allclose(pot.z2deriv(R, z, 3*pi/2), -deriv(lambda x: pot.zforce(R, x, 3*pi/2), z, dx=dx), rtol=rtol)
R, z = 1, -.3
assert_allclose(pot.z2deriv(R, z, 0), -deriv(lambda x: pot.zforce(R, x, 0), z, dx=dx), rtol=rtol)
assert_allclose(pot.z2deriv(R, z, pi/2), -deriv(lambda x: pot.zforce(R, x, pi/2), z, dx=dx), rtol=rtol)
assert_allclose(pot.z2deriv(R, z, pi), -deriv(lambda x: pot.zforce(R, x, pi), z, dx=dx), rtol=rtol)
assert_allclose(pot.z2deriv(R, z, 3*pi/2), -deriv(lambda x: pot.zforce(R, x, 3*pi/2), z, dx=dx), rtol=rtol)
R, z = 2.1, .99
assert_allclose(pot.z2deriv(R, z, 0), -deriv(lambda x: pot.zforce(R, x, 0), z, dx=dx), rtol=rtol)
assert_allclose(pot.z2deriv(R, z, pi/2), -deriv(lambda x: pot.zforce(R, x, pi/2), z, dx=dx), rtol=rtol)
assert_allclose(pot.z2deriv(R, z, pi), -deriv(lambda x: pot.zforce(R, x, pi), z, dx=dx), rtol=rtol)
assert_allclose(pot.z2deriv(R, z, 3*pi/2), -deriv(lambda x: pot.zforce(R, x, 3*pi/2), z, dx=dx), rtol=rtol)
def test_phi2deriv(self):
"""Test phi2deriv against a numerical derivative -d(phiforce) / d(phi)."""
dx = 1e-8
rtol = 1e-7 # relative tolerance
pot = spiral()
R, z = .3, 0
assert_allclose(pot.phi2deriv(R, z, 0), -deriv(lambda x: pot.phiforce(R, z, x), 0, dx=dx), rtol=rtol)
assert_allclose(pot.phi2deriv(R, z, pi/2.1), -deriv(lambda x: pot.phiforce(R, z, x), pi/2.1, dx=dx), rtol=rtol)
assert_allclose(pot.phi2deriv(R, z, pi), -deriv(lambda x: pot.phiforce(R, z, x), pi, dx=dx), rtol=rtol)
assert_allclose(pot.phi2deriv(R, z, 3*pi/2.5), -deriv(lambda x: pot.phiforce(R, z, x), 3*pi/2.5, dx=dx), rtol=rtol)
R, z = 1, -.3
assert_allclose(pot.phi2deriv(R, z, 0), -deriv(lambda x: pot.phiforce(R, z, x), 0, dx=dx), rtol=rtol)
assert_allclose(pot.phi2deriv(R, z, pi/2), -deriv(lambda x: pot.phiforce(R, z, x), pi/2, dx=dx), rtol=rtol)
assert_allclose(pot.phi2deriv(R, z, pi), -deriv(lambda x: pot.phiforce(R, z, x), pi, dx=dx), rtol=rtol)
assert_allclose(pot.phi2deriv(R, z, 3*pi/2), -deriv(lambda x: pot.phiforce(R, z, x), 3*pi/2, dx=dx), rtol=rtol)
R, z = 3.3, .7
assert_allclose(pot.phi2deriv(R, z, 0), -deriv(lambda x: pot.phiforce(R, z, x), 0, dx=dx), rtol=rtol)
assert_allclose(pot.phi2deriv(R, z, pi/2.1), -deriv(lambda x: pot.phiforce(R, z, x), pi/2.1, dx=dx), rtol=rtol)
assert_allclose(pot.phi2deriv(R, z, pi), -deriv(lambda x: pot.phiforce(R, z, x), pi, dx=dx), rtol=rtol)
assert_allclose(pot.phi2deriv(R, z, 3*pi/2), -deriv(lambda x: pot.phiforce(R, z, x), 3*pi/2, dx=dx), rtol=rtol)
pot = spiral(amp=13, N=1, alpha=-.3, r_ref=0.5, phi_ref=0.1, Rs=0.7, H=0.7, Cs=[1, 2, 3], omega=3)
R, z = .3, 0
assert_allclose(pot.phi2deriv(R, z, 0), -deriv(lambda x: pot.phiforce(R, z, x), 0, dx=dx), rtol=rtol)
assert_allclose(pot.phi2deriv(R, z, pi/2), -deriv(lambda x: pot.phiforce(R, z, x), pi/2, dx=dx), rtol=rtol)
assert_allclose(pot.phi2deriv(R, z, pi), -deriv(lambda x: pot.phiforce(R, z, x), pi, dx=dx), rtol=rtol)
assert_allclose(pot.phi2deriv(R, z, 3.3*pi/2), -deriv(lambda x: pot.phiforce(R, z, x), 3.3*pi/2, dx=dx), rtol=rtol)
R, z = 1, -.3
assert_allclose(pot.phi2deriv(R, z, 0), -deriv(lambda x: pot.phiforce(R, z, x), 0, dx=dx), rtol=rtol)
assert_allclose(pot.phi2deriv(R, z, pi/2), -deriv(lambda x: pot.phiforce(R, z, x), pi/2, dx=dx), rtol=rtol)
assert_allclose(pot.phi2deriv(R, z, pi), -deriv(lambda x: pot.phiforce(R, z, x), pi, dx=dx), rtol=rtol)
assert_allclose(pot.phi2deriv(R, z, 3*pi/2), -deriv(lambda x: pot.phiforce(R, z, x), 3*pi/2, dx=dx), rtol=rtol)
R, z = 3.3, .7
assert_allclose(pot.phi2deriv(R, z, 0), -deriv(lambda x: pot.phiforce(R, z, x), 0, dx=dx), rtol=rtol)
assert_allclose(pot.phi2deriv(R, z, pi/2.1), -deriv(lambda x: pot.phiforce(R, z, x), pi/2.1, dx=dx), rtol=rtol)
assert_allclose(pot.phi2deriv(R, z, pi), -deriv(lambda x: pot.phiforce(R, z, x), pi, dx=dx), rtol=rtol)
assert_allclose(pot.phi2deriv(R, z, 3*pi/2), -deriv(lambda x: pot.phiforce(R, z, x), 3*pi/2, dx=dx), rtol=rtol)
pot = spiral(amp=13, N=5, alpha=0.1, r_ref=.3, phi_ref=.1, Rs=0.77, H=0.747, Cs=[3, 2], omega=-3)
R, z = .3, 0
assert_allclose(pot.phi2deriv(R, z, 0), -deriv(lambda x: pot.phiforce(R, z, x), 0, dx=dx), rtol=rtol)
assert_allclose(pot.phi2deriv(R, z, pi/2), -deriv(lambda x: pot.phiforce(R, z, x), pi/2, dx=dx), rtol=rtol)
assert_allclose(pot.phi2deriv(R, z, pi), -deriv(lambda x: pot.phiforce(R, z, x), pi, dx=dx), rtol=rtol)
assert_allclose(pot.phi2deriv(R, z, 3*pi/2), -deriv(lambda x: pot.phiforce(R, z, x), 3*pi/2, dx=dx), rtol=rtol)
R, z = 1, -.3
assert_allclose(pot.phi2deriv(R, z, 0), -deriv(lambda x: pot.phiforce(R, z, x), 0, dx=dx), rtol=rtol)
assert_allclose(pot.phi2deriv(R, z, pi/2), -deriv(lambda x: pot.phiforce(R, z, x), pi/2, dx=dx), rtol=rtol)
assert_allclose(pot.phi2deriv(R, z, pi), -deriv(lambda x: pot.phiforce(R, z, x), pi, dx=dx), rtol=rtol)
assert_allclose(pot.phi2deriv(R, z, 3*pi/2), -deriv(lambda x: pot.phiforce(R, z, x), 3*pi/2, dx=dx), rtol=rtol)
R, z = 3.3, .7
assert_allclose(pot.phi2deriv(R, z, 0), -deriv(lambda x: pot.phiforce(R, z, x), 0, dx=dx), rtol=rtol)
assert_allclose(pot.phi2deriv(R, z, pi/2.1), -deriv(lambda x: pot.phiforce(R, z, x), pi/2.1, dx=dx), rtol=rtol)
assert_allclose(pot.phi2deriv(R, z, pi), -deriv(lambda x: pot.phiforce(R, z, x), pi, dx=dx), rtol=rtol)
assert_allclose(pot.phi2deriv(R, z, 3*pi/2), -deriv(lambda x: pot.phiforce(R, z, x), 3*pi/2, dx=dx), rtol=rtol)
pot = spiral(amp=11, N=7, alpha=.777, r_ref=7, phi_ref=.7, Cs=[8./(3.*pi), 0.5, 8./(15.*pi)])
R, z = .7, 0
assert_allclose(pot.phi2deriv(R, z, 0), -deriv(lambda x: pot.phiforce(R, z, x), 0, dx=dx), rtol=rtol)
assert_allclose(pot.phi2deriv(R, z, pi/2), -deriv(lambda x: pot.phiforce(R, z, x), pi/2, dx=dx), rtol=rtol)
assert_allclose(pot.phi2deriv(R, z, pi), -deriv(lambda x: pot.phiforce(R, z, x), pi, dx=dx), rtol=rtol)
assert_allclose(pot.phi2deriv(R, z, 3*pi/2), -deriv(lambda x: pot.phiforce(R, z, x), 3*pi/2, dx=dx), rtol=rtol)
R, z = 1, -.33
assert_allclose(pot.phi2deriv(R, z, 0), -deriv(lambda x: pot.phiforce(R, z, x), 0, dx=dx), rtol=rtol)
assert_allclose(pot.phi2deriv(R, z, pi/2.2), -deriv(lambda x: pot.phiforce(R, z, x), pi/2.2, dx=dx), rtol=rtol)
assert_allclose(pot.phi2deriv(R, z, pi), -deriv(lambda x: pot.phiforce(R, z, x), pi, dx=dx), rtol=rtol)
assert_allclose(pot.phi2deriv(R, z, 3*pi/2), -deriv(lambda x: pot.phiforce(R, z, x), 3*pi/2, dx=dx), rtol=rtol)
R, z = 1.123, .123
assert_allclose(pot.phi2deriv(R, z, 0), -deriv(lambda x: pot.phiforce(R, z, x), 0, dx=dx), rtol=rtol)
assert_allclose(pot.phi2deriv(R, z, pi/2.1), -deriv(lambda x: pot.phiforce(R, z, x), pi/2.1, dx=dx), rtol=rtol)
assert_allclose(pot.phi2deriv(R, z, pi), -deriv(lambda x: pot.phiforce(R, z, x), pi, dx=dx), rtol=rtol)
assert_allclose(pot.phi2deriv(R, z, 3*pi/2), -deriv(lambda x: pot.phiforce(R, z, x), 3*pi/2, dx=dx), rtol=rtol)
def test_dens(self):
"""Test dens against density obtained using Poisson's equation."""
rtol = 1e-2 # relative tolerance (this one isn't as precise)
pot = spiral()
assert_allclose(pot.dens(1, 0, 0, forcepoisson=False), pot.dens(1, 0, 0, forcepoisson=True), rtol=rtol)
assert_allclose(pot.dens(1, 1, .5, forcepoisson=False), pot.dens(1, 1, .5, forcepoisson=True), rtol=rtol)
assert_allclose(pot.dens(1, -1, -1, forcepoisson=False), pot.dens(1, -1, -1, forcepoisson=True), rtol=rtol)
assert_allclose(pot.dens(.1, .1, .1, forcepoisson=False), pot.dens(.1, .1, .1, forcepoisson=True), rtol=rtol)
assert_allclose(pot.dens(33, .777, .747, forcepoisson=False), pot.dens(33, .777, .747, forcepoisson=True), rtol=rtol)
pot = spiral(amp=3, N=5, alpha=.3, r_ref=.7, omega=5)
assert_allclose(pot.dens(1, 0, 0, forcepoisson=False), pot.dens(1, 0, 0, forcepoisson=True), rtol=rtol)
assert_allclose(pot.dens(1.2, 1.2, 1.2, forcepoisson=False), pot.dens(1.2, 1.2, 1.2, forcepoisson=True), rtol=rtol)
assert_allclose(pot.dens(1, -1, -1, forcepoisson=False), pot.dens(1, -1, -1, forcepoisson=True), rtol=rtol)
assert_allclose(pot.dens(.1, .1, .1, forcepoisson=False), pot.dens(.1, .1, .1, forcepoisson=True), rtol=rtol)
assert_allclose(pot.dens(33.3, .007, .747, forcepoisson=False), pot.dens(33.3, .007, .747, forcepoisson=True), rtol=rtol)
pot = spiral(amp=0.6, N=3, alpha=.24, r_ref=1, phi_ref=pi, Cs=[8./(3.*pi), 0.5, 8./(15.*pi)], omega=-3)
assert_allclose(pot.dens(1, 0, 0, forcepoisson=False), pot.dens(1, 0, 0, forcepoisson=True), rtol=rtol)
assert_allclose(pot.dens(1, 1, 1, forcepoisson=False), pot.dens(1, 1, 1, forcepoisson=True), rtol=rtol)
assert_allclose(pot.dens(1, -1, -1, forcepoisson=False), pot.dens(1, -1, -1, forcepoisson=True), rtol=rtol)
# assert_allclose(pot.dens(.1, .1, .1, forcepoisson=False), pot.dens(.1, .1, .1, forcepoisson=True), rtol=rtol)
assert_allclose(pot.dens(3.33, -7.77, -.747, forcepoisson=False), pot.dens(3.33, -7.77, -.747, forcepoisson=True), rtol=rtol)
pot = spiral(amp=100, N=4, alpha=pi/2, r_ref=1, phi_ref=1, Rs=7, H=77, Cs=[3, 1, 1], omega=-1.3)
assert_allclose(pot.dens(1, 0, 0, forcepoisson=False), pot.dens(1, 0, 0, forcepoisson=True), rtol=rtol)
assert_allclose(pot.dens(3, 2, pi, forcepoisson=False), pot.dens(3, 2, pi, forcepoisson=True), rtol=rtol)
assert_allclose(pot.dens(1, -1, -1, forcepoisson=False), pot.dens(1, -1, -1, forcepoisson=True), rtol=rtol)
assert_allclose(pot.dens(.1, .123, .1, forcepoisson=False), pot.dens(.1, .123, .1, forcepoisson=True), rtol=rtol)
assert_allclose(pot.dens(333, -.777, .747, forcepoisson=False), pot.dens(333, -.777, .747, forcepoisson=True), rtol=rtol)
def test_Rzderiv(self):
"""Test Rzderiv against a numerical derivative."""
dx = 1e-8
rtol = 1e-6
pot = spiral()
R, z, phi, t = 1, 0, 0, 0
assert_allclose(pot.Rzderiv(R, z, phi, t), -deriv(lambda x: pot.Rforce(R, x, phi, t), z, dx=dx), rtol=rtol)
R, z, phi, t = 0.7, 0.3, pi/3, 0
assert_allclose(pot.Rzderiv(R, z, phi, t), -deriv(lambda x: pot.Rforce(R, x, phi, t), z, dx=dx), rtol=rtol)
R, z, phi, t = 1.1, -0.3, pi/4.2, 3
assert_allclose(pot.Rzderiv(R, z, phi, t), -deriv(lambda x: pot.Rforce(R, x, phi, t), z, dx=dx), rtol=rtol)
R, z, phi, t = .777, .747, .343, 2.5
assert_allclose(pot.Rzderiv(R, z, phi, t), -deriv(lambda x: pot.Rforce(R, x, phi, t), z, dx=dx), rtol=rtol)
R, z, phi, t = 12, 1, 2, 3
assert_allclose(pot.Rzderiv(R, z, phi, t), -deriv(lambda x: pot.Rforce(R, x, phi, t), z, dx=dx), rtol=rtol)
R, z, phi, t = 3, 4, 5, 6
assert_allclose(pot.Rzderiv(R, z, phi, t), -deriv(lambda x: pot.Rforce(R, x, phi, t), z, dx=dx), rtol=rtol)
R, z, phi, t = 5, -.7, 3*pi/2, 5
assert_allclose(pot.Rzderiv(R, z, phi, t), -deriv(lambda x: pot.Rforce(R, x, phi, t), z, dx=dx), rtol=rtol)
R, z, phi, t = 11, 11, 11, 1.123
assert_allclose(pot.Rzderiv(R, z, phi, t), -deriv(lambda x: pot.Rforce(R, x, phi, t), z, dx=dx), rtol=rtol)
R, z, phi, t = 4, 7, 2, 10000
assert_allclose(pot.Rzderiv(R, z, phi, t), -deriv(lambda x: pot.Rforce(R, x, phi, t), z, dx=dx), rtol=rtol)
R, z, phi, t = .01, 0, 0, 0
assert_allclose(pot.Rzderiv(R, z, phi, t), -deriv(lambda x: pot.Rforce(R, x, phi, t), z, dx=dx), rtol=rtol)
R, z, phi, t = 1.23, 0, 44, 343
assert_allclose(pot.Rzderiv(R, z, phi, t), -deriv(lambda x: pot.Rforce(R, x, phi, t), z, dx=dx), rtol=rtol)
R, z, phi, t = 7, 7, 7, 7
assert_allclose(pot.Rzderiv(R, z, phi, t), -deriv(lambda x: pot.Rforce(R, x, phi, t), z, dx=dx), rtol=rtol)
pot = spiral(amp=13, N=7, alpha=.1, r_ref=1.123, phi_ref=.3, Rs=0.777, H=.5, Cs=[4.5], omega=-3.4)
R, z, phi, t = 1, 0, 0, 0
assert_allclose(pot.Rzderiv(R, z, phi, t), -deriv(lambda x: pot.Rforce(R, x, phi, t), z, dx=dx), rtol=rtol)
R, z, phi, t = .777, 0.333, pi/3, 0.
assert_allclose(pot.Rzderiv(R, z, phi, t), -deriv(lambda x: pot.Rforce(R, x, phi, t), z, dx=dx), rtol=rtol)
R, z, phi, t = 1.1, -0.3, pi/4.2, 3
assert_allclose(pot.Rzderiv(R, z, phi, t), -deriv(lambda x: pot.Rforce(R, x, phi, t), z, dx=dx), rtol=rtol)
R, z, phi, t = .777, .747, .343, 2.5
assert_allclose(pot.Rzderiv(R, z, phi, t), -deriv(lambda x: pot.Rforce(R, x, phi, t), z, dx=dx), rtol=rtol)
R, z, phi, t = 12, 1, 2, 3
assert_allclose(pot.Rzderiv(R, z, phi, t), -deriv(lambda x: pot.Rforce(R, x, phi, t), z, dx=dx), rtol=rtol)
R, z, phi, t = 3, 4, 5, 6
assert_allclose(pot.Rzderiv(R, z, phi, t), -deriv(lambda x: pot.Rforce(R, x, phi, t), z, dx=dx), rtol=rtol)
R, z, phi, t = 2, -.7, 3*pi/2, 5
assert_allclose(pot.Rzderiv(R, z, phi, t), -deriv(lambda x: pot.Rforce(R, x, phi, t), z, dx=dx), rtol=rtol)
R, z, phi, t = 11, 11, 11, 1.123
assert_allclose(pot.Rzderiv(R, z, phi, t), -deriv(lambda x: pot.Rforce(R, x, phi, t), z, dx=dx), rtol=rtol)
R, z, phi, t = 4, 7, 2, 10000
assert_allclose(pot.Rzderiv(R, z, phi, t), -deriv(lambda x: pot.Rforce(R, x, phi, t), z, dx=dx), rtol=rtol)
R, z, phi, t = .01, 0, 0, 0
assert_allclose(pot.Rzderiv(R, z, phi, t), -deriv(lambda x: pot.Rforce(R, x, phi, t), z, dx=dx), rtol=rtol)
R, z, phi, t = 1.23, 0, 44, 343
assert_allclose(pot.Rzderiv(R, z, phi, t), -deriv(lambda x: pot.Rforce(R, x, phi, t), z, dx=dx), rtol=rtol)
R, z, phi, t = 7, 7, 7, 7
assert_allclose(pot.Rzderiv(R, z, phi, t), -deriv(lambda x: pot.Rforce(R, x, phi, t), z, dx=dx), rtol=rtol)
pot = spiral(amp=11, N=2, alpha=.777, r_ref=7, Cs=[8.], omega=0.1)
R, z, phi, t = 1, 0, 0, 0
assert_allclose(pot.Rzderiv(R, z, phi, t), -deriv(lambda x: pot.Rforce(R, x, phi, t), z, dx=dx), rtol=rtol)
R, z, phi, t = 0.7, 0.3, pi/12, 0
assert_allclose(pot.Rzderiv(R, z, phi, t), -deriv(lambda x: pot.Rforce(R, x, phi, t), z, dx=dx), rtol=rtol)
R, z, phi, t = 1.1, -0.3, pi/4.2, 3
assert_allclose(pot.Rzderiv(R, z, phi, t), -deriv(lambda x: pot.Rforce(R, x, phi, t), z, dx=dx), rtol=rtol)
R, z, phi, t = .777, .747, .343, 2.5
assert_allclose(pot.Rzderiv(R, z, phi, t), -deriv(lambda x: pot.Rforce(R, x, phi, t), z, dx=dx), rtol=rtol)
R, z, phi, t = 2, 1, 2, 3
assert_allclose(pot.Rzderiv(R, z, phi, t), -deriv(lambda x: pot.Rforce(R, x, phi, t), z, dx=dx), rtol=rtol)
R, z, phi, t = 3, 4, 5, 6
assert_allclose(pot.Rzderiv(R, z, phi, t), -deriv(lambda x: pot.Rforce(R, x, phi, t), z, dx=dx), rtol=rtol)
R, z, phi, t = 5, -.7, 3*pi/2, 5
assert_allclose(pot.Rzderiv(R, z, phi, t), -deriv(lambda x: pot.Rforce(R, x, phi, t), z, dx=dx), rtol=rtol)
R, z, phi, t = 11, 11, 11, 1.123
assert_allclose(pot.Rzderiv(R, z, phi, t), -deriv(lambda x: pot.Rforce(R, x, phi, t), z, dx=dx), rtol=rtol)
R, z, phi, t = 4, 7, 2, 10000
assert_allclose(pot.Rzderiv(R, z, phi, t), -deriv(lambda x: pot.Rforce(R, x, phi, t), z, dx=dx), rtol=rtol)
R, z, phi, t = .01, 0, 0, 0
assert_allclose(pot.Rzderiv(R, z, phi, t), -deriv(lambda x: pot.Rforce(R, x, phi, t), z, dx=dx), rtol=rtol)
R, z, phi, t = 1.23, 0, 44, 343
assert_allclose(pot.Rzderiv(R, z, phi, t), -deriv(lambda x: pot.Rforce(R, x, phi, t), z, dx=dx), rtol=rtol)
R, z, phi, t = 7, 7, 7, 7
assert_allclose(pot.Rzderiv(R, z, phi, t), -deriv(lambda x: pot.Rforce(R, x, phi, t), z, dx=dx), rtol=rtol)
pot = spiral(amp=2, N=1, alpha=-0.1, r_ref=5, Rs=5, H=.7, Cs=[3.5], omega=3)
R, z, phi, t = 1, 0, 0, 0
assert_allclose(pot.Rzderiv(R, z, phi, t), -deriv(lambda x: pot.Rforce(R, x, phi, t), z, dx=dx), rtol=rtol)
R, z, phi, t = 0.77, 0.3, pi/3, 0
assert_allclose(pot.Rzderiv(R, z, phi, t), -deriv(lambda x: pot.Rforce(R, x, phi, t), z, dx=dx), rtol=rtol)
R, z, phi, t = 3.1, -0.3, pi/5, 2
assert_allclose(pot.Rzderiv(R, z, phi, t), -deriv(lambda x: pot.Rforce(R, x, phi, t), z, dx=dx), rtol=rtol)
R, z, phi, t = .777, .747, .343, 2.5
assert_allclose(pot.Rzderiv(R, z, phi, t), -deriv(lambda x: pot.Rforce(R, x, phi, t), z, dx=dx), rtol=rtol)
R, z, phi, t = 12, 1, 2, 3
assert_allclose(pot.Rzderiv(R, z, phi, t), -deriv(lambda x: pot.Rforce(R, x, phi, t), z, dx=dx), rtol=rtol)
R, z, phi, t = 3, 4, 5, 6
assert_allclose(pot.Rzderiv(R, z, phi, t), -deriv(lambda x: pot.Rforce(R, x, phi, t), z, dx=dx), rtol=rtol)
R, z, phi, t = 5, -.7, 3*pi/2, 5
assert_allclose(pot.Rzderiv(R, z, phi, t), -deriv(lambda x: pot.Rforce(R, x, phi, t), z, dx=dx), rtol=rtol)
R, z, phi, t = 11, 11, 11, 1.123
assert_allclose(pot.Rzderiv(R, z, phi, t), -deriv(lambda x: pot.Rforce(R, x, phi, t), z, dx=dx), rtol=rtol)
R, z, phi, t = 4, 7, 2, 10000
assert_allclose(pot.Rzderiv(R, z, phi, t), -deriv(lambda x: pot.Rforce(R, x, phi, t), z, dx=dx), rtol=rtol)
R, z, phi, t = .01, 0, 0, 0
assert_allclose(pot.Rzderiv(R, z, phi, t), -deriv(lambda x: pot.Rforce(R, x, phi, t), z, dx=dx), rtol=rtol)
R, z, phi, t = 1.23, 0, 44, 343
assert_allclose(pot.Rzderiv(R, z, phi, t), -deriv(lambda x: pot.Rforce(R, x, phi, t), z, dx=dx), rtol=rtol)
R, z, phi, t = 7, 7, 7, 7
assert_allclose(pot.Rzderiv(R, z, phi, t), -deriv(lambda x: pot.Rforce(R, x, phi, t), z, dx=dx), rtol=rtol)
def test_Rphideriv(self):
"""Test Rphideriv against a numerical derivative."""
dx = 1e-8
rtol = 5e-5
pot = spiral()
R, z, phi, t = 1, 0, 0, 0
assert_allclose(pot.Rphideriv(R, z, phi, t), -deriv(lambda x: pot.Rforce(R, z, x, t), phi, dx=dx), rtol=rtol)
R, z, phi, t = 0.7, 0.3, pi / 3, 0
assert_allclose(pot.Rphideriv(R, z, phi, t), -deriv(lambda x: pot.Rforce(R, z, x, t), phi, dx=dx), rtol=rtol)
R, z, phi, t = 1.1, -0.3, pi / 4.2, 3
assert_allclose(pot.Rphideriv(R, z, phi, t), -deriv(lambda x: pot.Rforce(R, z, x, t), phi, dx=dx), rtol=rtol)
R, z, phi, t = .777, .747, .343, 2.5
assert_allclose(pot.Rphideriv(R, z, phi, t), -deriv(lambda x: pot.Rforce(R, z, x, t), phi, dx=dx), rtol=rtol)
R, z, phi, t = 12, 1, 2, 3
assert_allclose(pot.Rphideriv(R, z, phi, t), -deriv(lambda x: pot.Rforce(R, z, x, t), phi, dx=dx), rtol=rtol)
R, z, phi, t = 3, 4, 5, 6
assert_allclose(pot.Rphideriv(R, z, phi, t), -deriv(lambda x: pot.Rforce(R, z, x, t), phi, dx=dx), rtol=rtol)
R, z, phi, t = 5, -.7, 3 * pi / 2, 5
assert_allclose(pot.Rphideriv(R, z, phi, t), -deriv(lambda x: pot.Rforce(R, z, x, t), phi, dx=dx), rtol=rtol)
R, z, phi, t = 11, 11, 11, 1.123
assert_allclose(pot.Rphideriv(R, z, phi, t), -deriv(lambda x: pot.Rforce(R, z, x, t), phi, dx=dx), rtol=rtol)
R, z, phi, t = 4, 7, 2, 1000
assert_allclose(pot.Rphideriv(R, z, phi, t), -deriv(lambda x: pot.Rforce(R, z, x, t), phi, dx=dx), rtol=rtol)
R, z, phi, t = .01, 0, 0, 0
assert_allclose(pot.Rphideriv(R, z, phi, t), -deriv(lambda x: pot.Rforce(R, z, x, t), phi, dx=dx), rtol=rtol)
R, z, phi, t = 1.23, 0, 44, 343
assert_allclose(pot.Rphideriv(R, z, phi, t), -deriv(lambda x: pot.Rforce(R, z, x, t), phi, dx=dx), rtol=rtol)
R, z, phi, t = 7, 1, 7, 7
assert_allclose(pot.Rphideriv(R, z, phi, t), -deriv(lambda x: pot.Rforce(R, z, x, t), phi, dx=dx), rtol=rtol)
pot = spiral(N=3, alpha=.21, r_ref=.5, phi_ref=pi, Cs=[2.], omega=-3)
R, z, phi, t = 1, 0, 0, 0
assert_allclose(pot.Rphideriv(R, z, phi, t), -deriv(lambda x: pot.Rforce(R, z, x, t), phi, dx=dx), rtol=rtol)
R, z, phi, t = 0.7, 0.3, pi / 3, 0
assert_allclose(pot.Rphideriv(R, z, phi, t), -deriv(lambda x: pot.Rforce(R, z, x, t), phi, dx=dx), rtol=rtol)
R, z, phi, t = 1.1, -0.3, pi / 4.2, 3
assert_allclose(pot.Rphideriv(R, z, phi, t), -deriv(lambda x: pot.Rforce(R, z, x, t), phi, dx=dx), rtol=rtol)
R, z, phi, t = .777, .747, .343, 2.5
assert_allclose(pot.Rphideriv(R, z, phi, t), -deriv(lambda x: pot.Rforce(R, z, x, t), phi, dx=dx), rtol=rtol)
R, z, phi, t = 12, 1, 2, 3
assert_allclose(pot.Rphideriv(R, z, phi, t), -deriv(lambda x: pot.Rforce(R, z, x, t), phi, dx=dx), rtol=rtol)
R, z, phi, t = 3, 4, 5, 6
assert_allclose(pot.Rphideriv(R, z, phi, t), -deriv(lambda x: pot.Rforce(R, z, x, t), phi, dx=dx), rtol=rtol)
R, z, phi, t = 5, -.7, 3 * pi / 2, 5
assert_allclose(pot.Rphideriv(R, z, phi, t), -deriv(lambda x: pot.Rforce(R, z, x, t), phi, dx=dx), rtol=rtol)
R, z, phi, t = 11, 11, 11, 1.123
assert_allclose(pot.Rphideriv(R, z, phi, t), -deriv(lambda x: pot.Rforce(R, z, x, t), phi, dx=dx), rtol=rtol)
R, z, phi, t = 3, 2, 1, 100
assert_allclose(pot.Rphideriv(R, z, phi, t), -deriv(lambda x: pot.Rforce(R, z, x, t), phi, dx=dx), rtol=rtol)
R, z, phi, t = .01, 0, 0, 0
assert_allclose(pot.Rphideriv(R, z, phi, t), -deriv(lambda x: pot.Rforce(R, z, x, t), phi, dx=dx), rtol=rtol)
R, z, phi, t = 1.12, 0, 2, 343
assert_allclose(pot.Rphideriv(R, z, phi, t), -deriv(lambda x: pot.Rforce(R, z, x, t), phi, dx=dx), rtol=rtol)
R, z, phi, t = 7, 7, 7, 7
assert_allclose(pot.Rphideriv(R, z, phi, t), -deriv(lambda x: pot.Rforce(R, z, x, t), phi, dx=dx), rtol=rtol)
def test_OmegaP(self):
sp = spiral()
assert sp.OmegaP() == 0
sp = spiral(N=1, alpha=2, r_ref=.1, phi_ref=.5, Rs=0.2, H=0.7, Cs=[1,2], omega=-123)
assert sp.OmegaP() == -123
sp = spiral(omega=123.456)
assert sp.OmegaP() == 123.456
def test_K(self):
pot = spiral()
R = 1
assert_allclose([pot._K(R)], [pot._ns * pot._N / R / np.sin(pot._alpha)])
R = 1e-6
assert_allclose([pot._K(R)], [pot._ns * pot._N / R / np.sin(pot._alpha)])
R = 0.5
assert_allclose([pot._K(R)], [pot._ns * pot._N / R / np.sin(pot._alpha)])
def test_B(self):
pot = spiral()
R = 1
assert_allclose([pot._B(R)], [pot._K(R) * pot._H * (1 + 0.4 * pot._K(R) * pot._H)])
R = 1e-6
assert_allclose([pot._B(R)], [pot._K(R) * pot._H * (1 + 0.4 * pot._K(R) * pot._H)])
R = 0.3
assert_allclose([pot._B(R)], [pot._K(R) * pot._H * (1 + 0.4 * pot._K(R) * pot._H)])
def test_D(self):
pot = spiral()
assert_allclose([pot._D(3)], [(1. + pot._K(3)*pot._H + 0.3 * pot._K(3)**2 * pot._H**2.) / (1. + 0.3*pot._K(3) * pot._H)])
assert_allclose([pot._D(1e-6)], [(1. + pot._K(1e-6)*pot._H + 0.3 * pot._K(1e-6)**2 * pot._H**2.) / (1. + 0.3*pot._K(1e-6) * pot._H)])
assert_allclose([pot._D(.5)], [(1. + pot._K(.5)*pot._H + 0.3 * pot._K(.5)**2 * pot._H**2.) / (1. + 0.3*pot._K(.5) * pot._H)])
def test_dK_dR(self):
pot = spiral()
dx = 1e-8
assert_allclose(pot._dK_dR(3), deriv(pot._K, 3, dx=dx))
assert_allclose(pot._dK_dR(2.3), deriv(pot._K, 2.3, dx=dx))
assert_allclose(pot._dK_dR(-2.3), deriv(pot._K, -2.3, dx=dx))
def test_dB_dR(self):
pot = spiral()
dx = 1e-8
assert_allclose(pot._dB_dR(3.3), deriv(pot._B, 3.3, dx=dx))
assert_allclose(pot._dB_dR(1e-3), deriv(pot._B, 1e-3, dx=dx))
assert_allclose(pot._dB_dR(3), deriv(pot._B, 3, dx=dx))
def test_dD_dR(self):
pot = spiral()
dx = 1e-8
assert_allclose(pot._dD_dR(1e-3), deriv(pot._D, 1e-3, dx=dx))
assert_allclose(pot._dD_dR(2), deriv(pot._D, 2, dx=dx))
def test_gamma(self):
pot = spiral()
R, phi = 1, 2
assert_allclose(pot._gamma(R, phi), [pot._N * (float(phi) - pot._phi_ref - np.log(float(R) / pot._r_ref) /
np.tan(pot._alpha))])
R , phi = .1, -.2
assert_allclose(pot._gamma(R, phi), [pot._N * (float(phi) - pot._phi_ref - np.log(float(R) / pot._r_ref) /
np.tan(pot._alpha))])
R, phi = 0.01, 0
assert_allclose(pot._gamma(R, phi), [pot._N * (float(phi) - pot._phi_ref - np.log(float(R) / pot._r_ref) /
np.tan(pot._alpha))])
def test_dgamma_dR(self):
pot = spiral()
dx = 1e-8
assert_allclose(pot._dgamma_dR(3.), deriv(lambda x: pot._gamma(x, 1), 3., dx=dx))
assert_allclose(pot._dgamma_dR(3), deriv(lambda x: pot._gamma(x, 1), 3, dx=dx))
assert_allclose(pot._dgamma_dR(0.01), deriv(lambda x: pot._gamma(x, 1), 0.01, dx=dx))
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestSpiralArmsPotential)
unittest.TextTestRunner(verbosity=2).run(suite)
|
460618
|
import asyncio
import concurrent.futures
import logging
import multiprocessing
import time
from enum import Enum, auto
import cv2
from .aruco_marker import ArucoMarker
MAX_READ_FAILURES_PER_INIT = 3
LOOPBACK_DEV_PATH = "/dev/video21"
class CapComm(Enum):
"""For communication between ArucoDetectionProcess and ArucoDetector"""
INIT_SUCCESS = auto()
INIT_FAILURE = auto()
ARUCO_REQUEST = auto()
CROP_REQUEST = auto()
RELEASE_REQUEST = auto()
RELEASED = auto()
class ArucoDetectionProcess:
"""Separated cv2.VideoCapture process class with aruco marker detection
Should be started with multiprocessing.Process(... daemon=True), so it
won't block exit if the main process fails.
Before usage: `pip install numpy opencv-contrib-python`
:param source: Camera id or path
:type source: String/Int
:param conn: multiprocessing.connection.Pipe() one end of the connection
:type conn: multiprocessing.connection.Connection
"""
def __init__(
self, source, conn, api_preference, vertical_flip, horizontal_flip
): # noqa: N803
self._source = source
self._conn = conn
self._api_preference = api_preference
self.arucoDict = cv2.aruco.Dictionary_get(cv2.aruco.DICT_5X5_50)
self.arucoParams = cv2.aruco.DetectorParameters_create()
self.crop_params = (0, 0, 0, 0)
self._vertical_flip = vertical_flip
self._horizontal_flip = horizontal_flip
def run(self):
# initialize self._cap cv2.VideoCapture
self._init_cap(True)
while True:
# wait until a new request
req = self._conn.recv()
# respond to the request
if req == CapComm.ARUCO_REQUEST:
frame = self._read()
self._conn.send(frame)
if req == CapComm.CROP_REQUEST:
self.crop_params = self._conn.recv()
self.patch_size = (
int(self.crop_params[0] - self.crop_params[1]),
int(self.crop_params[2] - self.crop_params[3]),
)
self.center = (
self.crop_params[1]
+ (self.crop_params[0] - self.crop_params[1]) / 2,
self.crop_params[3]
+ (self.crop_params[2] - self.crop_params[3]) / 2,
)
elif req == CapComm.RELEASE_REQUEST:
self._cap.release()
self._conn.send(CapComm.RELEASED)
break
def _create_cap(self):
self._cap = cv2.VideoCapture(self._source, self._api_preference)
self._cap.set(cv2.CAP_PROP_BUFFERSIZE, 1)
# When not using loopback device, default resolution can be 1080p
# which is too much for raspi to handle with streamer also running
if self._source != LOOPBACK_DEV_PATH:
self._cap.set(
cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc("M", "J", "P", "G")
)
self._cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1280)
self._cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 720)
self.resolution = (
self._cap.get(cv2.CAP_PROP_FRAME_WIDTH),
self._cap.get(cv2.CAP_PROP_FRAME_HEIGHT),
)
def _init_cap(self, send):
self._create_cap()
if not self._cap.isOpened():
if send:
self._conn.send(CapComm.INIT_FAILURE)
raise RuntimeError(f"Could not open camera '{self._source}'")
self._conn.send(CapComm.INIT_SUCCESS)
def _read(self):
# TODO: handle same ID appearing multiple times
success = False
markers = []
success, frame = self._cap.read()
if not success or len(frame) == 0:
return markers
if self._vertical_flip and self._horizontal_flip:
frame = cv2.flip(frame, -1)
elif self._vertical_flip:
frame = cv2.flip(frame, 0)
elif self._horizontal_flip:
frame = cv2.flip(frame, 1)
if any(self.crop_params):
frame = cv2.getRectSubPix(frame, self.patch_size, self.center)
(corners, ids, rejected) = cv2.aruco.detectMarkers(
frame, self.arucoDict, parameters=self.arucoParams
)
if len(corners) == 0:
return markers
ids = ids.flatten()
markers = [
ArucoMarker(ids[i], corners[i][0], self.resolution)
for i in range(len(ids))
]
return markers
class ArucoDetector:
"""Non-blocking aruco detector
Only one ArucoDetector instance can exist per camera. If aruco markers are
used in more than one location, subscribe the users to the same
ArucoDetector instance. Use factory method
'await ArucoDetector.create(source, ...)' instead of __init__.
"""
@classmethod
async def create(
cls,
source=LOOPBACK_DEV_PATH,
init_timeout=2,
read_timeout=2,
release_timeout=2,
process_class=ArucoDetectionProcess,
api_preference=cv2.CAP_V4L2, # noqa: N803
vertical_flip=False,
horizontal_flip=False,
):
"""Factory method for ArucoDetector, use this instead of __init__
:param source: Camera id or path. Defaults to streamer main camera
loopback device on Surrogate image ("/dev/video21").
:type source: String/Int
:param init_timeout: Max time to wait for VideoCapture init,
otherwise RuntimeError will be raised, defaults to 2
:type release_timeout: int, optional
:param read_timeout: Max time to wait for frame in seconds, after the
timeout VideoCapture will be released and reinitialized, defaults
to 2
:type read_timeout: int, optional
:param release_timeout: Max time to wait for VideoCapture release,
otherwise SIGKILL will be sent, defaults to 2
:type release_timeout: int, optional
:param process_class: Video capture process class implementation,
option mainly for easier testing
:type process_class: VideoCaptureProcess, optional
:param api_preference: backend api_preference for cv2.VideoCapture,
defaults to cv2.CAP_V4L2
:type api_preference: cv2 VideoCaptureAPI, optional
:param vertical_flip: Flip frames vertically before aruco detection.
Defaults to False
:type vertical_flip: boolean, optional
:param horizontal_flip: Flip frames horizontally before aruco
detection. Defaults to False
:type horizontal_flip: boolean, optional
"""
self = cls()
# save the correct state
self._source = source
self._init_timeout = init_timeout
self._read_timeout = read_timeout
self._release_timeout = release_timeout
self._process_class = process_class
self._api_preference = api_preference
self._vertical_flip = vertical_flip
self._horizontal_flip = horizontal_flip
self._released = False
self._start_time = None
self.callbacks = []
# initialize and start video_capture_process
self._executor = concurrent.futures.ThreadPoolExecutor(max_workers=1)
self._loop = asyncio.get_event_loop()
await self._start_process()
return self
def register_observer(self, callback):
"""Register to receive all found aruco markers.
:param callback: Function that will do something with the markers
:type callback: Function that takes a list of aruco markers
"""
logging.info("aruco detect registering observer")
self.callbacks.append(callback)
def unregister_observer(self, callback):
"""Unregister from receiving aruco markers
:param callback: Function that will do something with the markers
:type callback: Function that takes a list of aruco markers
"""
try:
self.callbacks.remove(callback)
except ValueError as e:
logging.info(f"error while removing aruco callback: {e}")
pass
def unregister_all_observers(self):
"""Unregister all callbacks from receiving aruco markers"""
self.callbacks = []
def set_crop(self, crop_params):
"""Set cropping for frames used for aruco detection. Useful for
increasing performance.
:param crop_params: Tuple of pixel coordinates:
(max_x, min_x, max_y, min_y), representing the area of the frame
left after cropping.
:type crop_params: Tuple of four floats
"""
if len(self.callbacks) != 0:
logging.error("Unable to set aruco cropping while reading frames")
return
self._conn_main.send(CapComm.CROP_REQUEST)
self._conn_main.send(crop_params)
def _detect_cb(self, found_markers):
for callback in self.callbacks:
callback(found_markers)
async def _start_process(self):
self._conn_main, self._conn_process = multiprocessing.Pipe()
cap_process = self._process_class(
self._source,
self._conn_process,
self._api_preference,
self._vertical_flip,
self._horizontal_flip,
)
self._cap_process = multiprocessing.Process(
target=cap_process.run,
daemon=True,
name="SRTG Controller video capture",
)
self._cap_process.start()
await self._verify_process_start()
self._image_rec_task = asyncio.create_task(self._read())
async def _verify_process_start(self):
response = await self._get_response(self._init_timeout)
if response is CapComm.INIT_SUCCESS:
logging.info(f"Camera '{self._source}' opened successfully")
elif response is CapComm.INIT_FAILURE:
raise RuntimeError(
f"Could not open camera '{self._source}' after initialization"
)
else: # None
raise RuntimeError(
f"Camera '{self._source}' initialization took more than "
f"init_timeout ({self._init_timeout}) seconds"
)
async def _restart_process(self):
await self._release()
await self._start_process()
async def _get_response(self, timeout):
"""Gets response in timeout seconds or returns None"""
if await self._loop.run_in_executor(
self._executor, self._conn_main.poll, timeout
):
return self._conn_main.recv()
else:
return None
async def _read(self):
"""Keeps requesting aruco markers until released"""
while not self._released:
if self._released:
logging.info(
"ArucoDetector has been released, stopping detection"
)
break
# TODO: This isn't efficient. Fix the system to safely create and
# cancel this task as needed.
if len(self.callbacks) > 0:
# send aruco request
self._conn_main.send(CapComm.ARUCO_REQUEST)
# get response
markers = await self._get_response(self._read_timeout)
if markers is not None and len(markers):
self._detect_cb(markers)
await asyncio.sleep(0.1)
async def __aenter__(self):
return self.frames()
async def __aexit__(self, type, value, traceback):
await self.release()
async def release(self):
"""Release resources"""
await self._release()
self._released = True
# release the executor
self._executor.shutdown()
async def _release(self):
# send release request
self._conn_main.send(CapComm.RELEASE_REQUEST)
# wait for the response, measure the time it took
start = time.time()
response = await self._get_response(self._release_timeout)
end = time.time()
response_time = end - start
logging.info(response)
# check if actually responded to old
# CapComm.ARUCO_REQUEST
if (
isinstance(response, list)
and isinstance(response[0], ArucoMarker)
and response_time > self.release_timeout
):
# if was aruco marker, and still _release_time left,
# wait a bit more for CapComm.RELEASED
logging.info("got resp to old frame")
response = await self._get_response(
response_time - self._release_timeout
)
if response == CapComm.RELEASED:
logging.info(f"Camera '{self._source}' released")
self._cap_process.join() # should join immediately
else: # None
logging.warning(
f"VideoCapture did not release in {self._release_timeout} "
"seconds, must be killed"
)
self._cap_process.kill()
await self._loop.run_in_executor(
self._executor, self._cap_process.join
)
|
460635
|
import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
import json
filter_arg = json.loads(demisto.args().get("filter", json.dumps({"tags": ["report"]})))
raw_entries = None
if filter_arg:
raw_entries = demisto.executeCommand('getEntries', {"id": demisto.incident().get("id"), "filter": filter_arg})
if raw_entries:
entries = []
for entry in raw_entries:
entries.append(str(entry["Contents"]))
else:
entries = ["No entries tagged with 'report' tag"]
# demisto.results(str(entries))
result = {
'Type': entryTypes["note"],
'Contents': "\n".join(entries),
'ContentsFormat': formats['markdown'],
'HumanReadable': "\n".join(entries),
'ReadableContentsFormat': formats['markdown']
}
demisto.results(result)
|
460658
|
import matplotlib.pyplot as plt
import numpy as np
from . import common
def plot(soln):
soln = common.numpyify(soln)
fig, axes = plt.subplots(2, 3)
fig.set_size_inches(12, 8)
ax = axes[0, 0]
ax.errorbar(
np.arange(soln.μd.shape[0]),
soln.μd[:, 0], yerr=soln.σd[0, :], marker='.', linestyle='')
ax.set_xlim(0, len(soln.μ)-1)
ax.set_title('μ vs. first agent')
ax.grid(True, axis='x')
ax = axes[0, 1]
ax.imshow(np.where(soln.σd > 0, soln.σd, np.nan))
ax.set_title('σd')
ax = axes[1, 0]
ax.plot(soln.trace.l)
ax.set_xlim(0, len(soln.trace.l)-1)
ax.set_title('loss')
ax.set_yscale('log')
ax.grid(True, axis='x')
ax = axes[1, 1]
ax.plot(soln.trace.relnorm)
ax.set_xlim(0, len(soln.trace.relnorm)-1)
ax.set_yscale('log')
ax.set_title('norms')
ax.grid(True, axis='x')
ax = axes[0, 2]
ax.imshow(np.log10(np.where(soln.n >= 1, soln.n, np.nan)))
ax.set_title('log(games played)')
ax = axes[1, 2]
ratio = np.full_like(soln.n, np.nan)
np.divide(soln.w, soln.n, where=soln.n > 0, out=ratio)
ax.imshow(ratio, vmin=0, vmax=1, cmap='RdBu')
ax.set_title('winrate')
return fig
|
460660
|
import jumpscale.packages.vdc_dashboard.bottle.api.root
import jumpscale.packages.vdc_dashboard.bottle.api.backup
import jumpscale.packages.vdc_dashboard.bottle.api.deployments
import jumpscale.packages.vdc_dashboard.bottle.api.export
## now that we have loaded all of the submodules and registered more endpoints
## on top /vdc_dashboard/api, we define the app value that will get registered as subapp
## on jsmainapp for the 3bot, that runs on 31000
app = jumpscale.packages.vdc_dashboard.bottle.api.root.app
|
460670
|
from __future__ import print_function
import os
import sys
import random
import torch
import torch.utils.data as data
import torchvision.transforms as transforms
from PIL import Image
class ListDataset(data.Dataset):
'''Load image/labels/boxes from a list file.
The list file is like:
a.jpg xmin ymin xmax ymax label xmin ymin xmax ymax label ...
'''
def __init__(self, root, list_file, transform=None):
'''
Args:
root: (str) ditectory to images.
list_file: (str/[str]) path to index file.
transform: (function) image/box transforms.
'''
self.root = root
self.transform = transform
self.fnames = []
self.boxes = []
self.labels = []
if isinstance(list_file, list):
# Cat multiple list files together.
# This is especially useful for voc07/voc12 combination.
tmp_file = '/tmp/listfile.txt'
os.system('cat %s > %s' % (' '.join(list_file), tmp_file))
list_file = tmp_file
with open(list_file) as f:
lines = f.readlines()
self.num_imgs = len(lines)
for line in lines:
splited = line.strip().split()
self.fnames.append(splited[0])
num_boxes = (len(splited) - 1) // 5
box = []
label = []
for i in range(num_boxes):
xmin = splited[1+5*i]
ymin = splited[2+5*i]
xmax = splited[3+5*i]
ymax = splited[4+5*i]
c = splited[5+5*i]
box.append([float(xmin),float(ymin),float(xmax),float(ymax)])
label.append(int(c))
self.boxes.append(torch.Tensor(box))
self.labels.append(torch.LongTensor(label))
def __getitem__(self, idx):
'''Load image.
Args:
idx: (int) image index.
Returns:
img: (tensor) image tensor.
boxes: (tensor) bounding box targets.
labels: (tensor) class label targets.
'''
# Load image and boxes.
fname = self.fnames[idx]
img = Image.open(os.path.join(self.root, fname))
if img.mode != 'RGB':
img = img.convert('RGB')
boxes = self.boxes[idx].clone() # use clone to avoid any potential change.
labels = self.labels[idx].clone()
if self.transform:
img, boxes, labels = self.transform(img, boxes, labels)
return img, boxes, labels
def __len__(self):
return self.num_imgs
|
460715
|
import unittest
import logging
from mock import Mock
from rollingpin.utils import swallow_exceptions
class TestUtils(unittest.TestCase):
def test_swallow_exception_on_error(self):
logger = Mock()
exception = Exception("fail")
with swallow_exceptions("tester", logger):
raise exception
logger.warning.assert_called_with('%s: %s', 'tester', exception)
def test_swallow_exception_no_error(self):
logger = Mock()
with swallow_exceptions("tester", logger):
pass
logger.warning.assert_not_called()
|
460757
|
import os
ALLOWED_HOSTS = ['*']
BASE_DIR = os.path.dirname(__file__)
SECRET_KEY = "007"
INSTALLED_APPS = [
# Default Django apps
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"bulma",
"showcase"
]
ROOT_URLCONF = "test_project.urls"
DATABASES = {"default": {"ENGINE": "django.db.backends.sqlite3", "NAME": ":memory:"}}
MIDDLEWARE = (
"django.contrib.sessions.middleware.SessionMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware", # required for django.contrib.admin
"django.contrib.messages.middleware.MessageMiddleware", # required for django.contrib.admin
)
STATIC_URL = "/static/"
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
'DIRS': [
os.path.join(BASE_DIR, 'templates')
],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.debug",
"django.template.context_processors.i18n",
"django.template.context_processors.media",
"django.template.context_processors.static",
"django.template.context_processors.tz",
"django.contrib.messages.context_processors.messages",
]
},
}
]
|
460772
|
from datetime import datetime
def today():
time = datetime.now()
return time.strftime("%Y%m%d")
def timestamp():
time = datetime.now()
return time.strftime("%Y%m%d%H%M%S%f")
|
460830
|
from django.utils.translation import ugettext_lazy as _
from rest_framework.exceptions import ValidationError
from dynamicforms import serializers
from dynamicforms.action import Actions, TableAction, TablePosition
from dynamicforms.viewsets import ModelViewSet
from ..models import Validated
class ValidatedSerializer(serializers.ModelSerializer):
form_titles = {
'table': 'Validated list',
'new': 'New validated object',
'edit': 'Editing validated object',
}
actions = Actions(
TableAction(TablePosition.HEADER, label=_('+ Add (refresh record)'), title=_('Add new record'),
action_js="dynamicforms.newRow('{% url url_reverse|add:'-detail' pk='new' format='html' %}'"
", 'record', __TABLEID__);"),
TableAction(TablePosition.HEADER, label=_('+ Add (refresh table)'), title=_('Add new record'),
action_js="dynamicforms.newRow('{% url url_reverse|add:'-detail' pk='new' format='html' %}'"
", 'table', __TABLEID__);"),
TableAction(TablePosition.HEADER, label=_('+ Add (no refresh)'), title=_('Add new record'),
action_js="dynamicforms.newRow('{% url url_reverse|add:'-detail' pk='new' format='html' %}'"
", 'no refresh', __TABLEID__);"),
TableAction(TablePosition.ROW_CLICK, label=_('Edit'), title=_('Edit record'),
action_js="dynamicforms.editRow('{% url url_reverse|add:'-detail' pk='__ROWID__' format='html'"
" %}'.replace('__ROWID__', $(event.target.parentElement).closest('tr[class=\"df-table-row\"]').attr('data-id'))"
", 'record', __TABLEID__);"),
TableAction(TablePosition.ROW_END, label=_('Delete (refresh record)'), title=_('Delete record'),
action_js="dynamicforms.deleteRow('{% url url_reverse|add:'-detail' pk=row.id %}', "
+ "{{row.id}}, 'record', __TABLEID__);"),
TableAction(TablePosition.ROW_END, label=_('Delete (refresh table)'), title=_('Delete record'),
action_js="dynamicforms.deleteRow('{% url url_reverse|add:'-detail' pk=row.id %}', "
+ "{{row.id}}, 'table', __TABLEID__);"),
TableAction(TablePosition.ROW_END, label=_('Delete (no refresh)'), title=_('Delete record'),
action_js="dynamicforms.deleteRow('{% url url_reverse|add:'-detail' pk=row.id %}', "
+ "{{row.id}}, 'no refresh', __TABLEID__);"),
# The following action is duplicated unnecessarily just to later eliminate it in suppress_action
TableAction(TablePosition.ROW_END, name='del 1', label=_('Delete (no refresh)'), title=_('Delete record'),
action_js="dynamicforms.deleteRow('{% url url_reverse|add:'-detail' pk=row.id %}', "
+ "{{row.id}}, 'no refresh', __TABLEID__);")
)
def validate(self, attrs):
attrs = super().validate(attrs)
if attrs['amount'] != 5:
if attrs['code'] != '123':
raise ValidationError({'amount': 'amount can only be different than 5 if code is "123"'})
if attrs['enabled'] is True and attrs['item_type'] == 3:
raise ValidationError('When enabled you can only choose from first three item types')
return attrs
def suppress_action(self, action, request, viewset):
if action.name == 'del 1':
return True
return super().suppress_action(action, request, viewset)
class Meta:
model = Validated
exclude = ()
class ValidatedViewSet(ModelViewSet):
template_context = dict(url_reverse='validated')
queryset = Validated.objects.all()
serializer_class = ValidatedSerializer
|
460833
|
import datetime
import json
import os
import re
import subprocess
from shutil import copyfile
import yaml
from buildtest.config import SiteConfiguration
from buildtest.defaults import console
from buildtest.exceptions import BuildTestError, ConfigurationError
from buildtest.schemas.defaults import custom_validator, schema_table
from buildtest.utils.tools import deep_get
from lmod.module import Module
from lmod.spider import Spider
from rich.syntax import Syntax
def compiler_cmd(args, configuration):
if args.compilers == "find":
compiler_find(args, configuration)
return
bc = BuildtestCompilers(configuration)
if args.json is False and args.yaml is False:
bc.print_compilers()
if args.json:
bc.print_json()
if args.yaml:
bc.print_yaml()
def compiler_find(args, configuration):
"""This method implements ``buildtest config compilers find`` which detects
new compilers based on module names defined in configuration. If system has
Lmod we use Lmodule API to detect the compilers. For environment-modules we
search for all modules in current ``$MODULEPATH``.
"""
bc = BuildtestCompilers(debug=args.debug, configuration=configuration)
bc.find_compilers()
# configuration["compilers"]["compiler"] = bc.compilers
configuration.target_config["compilers"]["compiler"] = bc.compilers
system = configuration.name()
# delete system entry
del configuration.config["system"][system]
configuration.config["system"][system] = configuration.target_config
custom_validator(
configuration.config, schema_table["settings.schema.json"]["recipe"]
)
syntax = Syntax(
yaml.safe_dump(configuration.config, default_flow_style=False, sort_keys=False),
"yaml",
theme="emacs",
)
console.print(syntax)
# if --update is specified we update existing configuration file and write backup in same directory
if args.update:
fname = (
"buildtest_"
+ datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
+ ".yml"
)
backup_file = os.path.join(os.path.dirname(configuration.file), fname)
copyfile(configuration.file, backup_file)
print("Writing backup configuration file to: ", backup_file)
print(f"Updating configuration file: {configuration.file}")
with open(configuration.file, "w") as fd:
yaml.safe_dump(
configuration.config,
fd,
default_flow_style=False,
sort_keys=False,
)
class BuildtestCompilers:
compiler_table = {
"gcc": {"cc": "gcc", "cxx": "g++", "fc": "gfortran"},
"intel": {"cc": "icc", "cxx": "icpc", "fc": "ifort"},
"pgi": {"cc": "pgcc", "cxx": "pgc++", "fc": "pgfortran"},
"cray": {"cc": "cc", "cxx": "CC", "fc": "ftn"},
"clang": {"cc": "clang", "cxx": "clang++", "fc": "None"},
"cuda": {"cc": "nvcc", "cxx": "nvcc", "fc": "None"},
"upcxx": {"cc": "upcxx", "cxx": "upcxx", "fc": "None"},
"nvhpc": {"cc": "nvc", "cxx": "nvcc", "fc": "nvfortran"},
}
def __init__(self, configuration, settings_file=None, debug=False):
"""
:param settings_file: Specify an alternate settings file to use when finding compilers
:param settings_file: str, optional
:param compilers: compiler section from buildtest configuration.
:type compilers: dict
"""
self.configuration = configuration
# if settings_file is provided, let's load settings into SiteConfiguration
# and set self.configuration to loaded configuration
if settings_file:
bc = SiteConfiguration(settings_file)
bc.detect_system()
bc.validate()
self.configuration = bc
self.debug = debug
if not deep_get(self.configuration.target_config, "compilers", "compiler"):
raise BuildTestError("compiler section not defined")
self.compilers = self.configuration.target_config["compilers"]["compiler"]
self._names = []
self.compiler_name_to_group = {}
for name in self.compilers:
if isinstance(self.compilers[name], dict):
self._names += self.compilers[name].keys()
for compiler in self.compilers[name].keys():
self.compiler_name_to_group[compiler] = name
def find_compilers(self):
"""This method returns compiler modules discovered depending on your module system.
If you have Lmod system we use spider utility to detect modules, this is leveraging
Lmodule API. If you have environment-modules we parse output of ``module av -t``.
:return: return a list of compiler modules detected based on module key name.
:rtype: dict
"""
self.moduletool = self.configuration.target_config.get("moduletool")
if self.moduletool == "N/A" or not self.moduletool:
raise ConfigurationError(
self.configuration.config,
self.configuration.file,
"You must have environment-modules or lmod to use this tool. Please specify 'moduletool' in your configuration",
)
# The 'find' section is required for discovering new compilers
if not self.configuration.target_config["compilers"].get("find"):
raise ConfigurationError(
self.configuration.config,
self.configuration.file,
"Compiler 'find' section not detected, we are unable to search for compilers.",
)
module_dict = {}
print(f"MODULEPATH: {os.getenv('MODULEPATH')}")
# First we discover modules, if its Lmod we use Lmodule API class Spider to retrieve modules
if self.moduletool == "lmod":
if self.debug:
print("Searching modules via Lmod Spider")
spider = Spider()
spider_modules = list(spider.get_modules().values())
for name, module_regex_patttern in self.configuration.target_config[
"compilers"
]["find"].items():
module_dict[name] = []
raw_string = r"{}".format(module_regex_patttern)
for module_fname in spider_modules:
if self.debug:
print(
f"Applying regex {raw_string} with module: {module_fname}"
)
if re.match(raw_string, module_fname):
module_dict[name].append(module_fname)
# for environment-modules we retrieve modules by parsing output of 'module av -t'
elif self.moduletool == "environment-modules":
module_av = "module av -t"
if self.debug:
print(f"Searching modules by parsing content of command: {module_av}")
cmd = subprocess.getoutput("module av -t")
modules = cmd.split()
# discover all modules based with list of module names specified in find field, we add all
# modules that start with the key name
for compiler, module_regex_pattern in self.configuration.target_config[
"compilers"
]["find"].items():
module_dict[compiler] = []
raw_string = r"{}".format(module_regex_pattern)
# apply regex against all modules, some modules have output with
# (default) in that case we replace with empty string
module_dict[compiler] += [
module.replace("(default)", "")
for module in modules
if re.match(raw_string, module)
]
# ignore entry where value is empty list
module_dict = {k: v for k, v in module_dict.items() if v}
if not module_dict:
raise BuildTestError("No modules discovered")
self._validate_modules(module_dict)
self._update_compiler_section()
def _validate_modules(self, module_dict):
"""This method will validate modules by running ``module load`` test for all
discovered modules specified in parameter ``discovered_modules``. This method
returns a list of modules that were valid, if all tests pass we return the same
list. A module test pass if we get a returncode 0.
"""
if self.debug:
print(f"Testing all discovered modules: {list(module_dict.values())}")
self.compiler_modules_lookup = {}
# test all modules via 'module load' and add only modules that passed (ret: 0)
for name, module_list in module_dict.items():
self.compiler_modules_lookup[name] = []
for module in module_list:
cmd = Module(module, debug=self.debug)
ret = cmd.test_modules(login=True)
# if module load test passed we add entry to list
if ret == 0:
self.compiler_modules_lookup[name].append(module)
def _update_compiler_section(self):
"""This method will update the compiler section by adding new compilers if
found
:return: Updated compiler section for buildtest configuration
:rtype: dict
"""
for name, module_list in self.compiler_modules_lookup.items():
if not self.compilers.get(name):
self.compilers[name] = {}
for module in module_list:
# if its a new compiler entry let's add new entry to dict
if module not in self.compilers.get(name).keys():
self.compilers[name][module] = self.compiler_table[name].copy()
# define module section for each compiler. This setting is automatically
# set by buildtest but user may want to tweak this later.
self.compilers[name][module]["module"] = {}
self.compilers[name][module]["module"]["load"] = [module]
self.compilers[name][module]["module"]["purge"] = False
def print_json(self):
"""Prints compiler section in JSON, this implements ``buildtest config compilers --json``"""
print(json.dumps(self.compilers, indent=2))
def print_yaml(self):
"""Prints compiler section in YAML, this implements ``buildtest config compilers --yaml``"""
print(yaml.dump(self.compilers, default_flow_style=False))
def names(self):
"""Return a list of compiler names defined in buildtest configuration"""
return self._names
def print_compilers(self):
"""This method implements ``buildtest config compilers`` which
prints all compilers from buildtest configuration
"""
for name in self._names:
print(name)
|
460843
|
from django.conf import settings
import os
def get_project_root():
""" get the project root directory """
settings_mod = __import__(settings.SETTINGS_MODULE, {}, {}, [''])
return os.path.dirname(os.path.abspath(settings_mod.__file__))
|
460850
|
import cirq
from cirq.contrib.quantum_volume import QuantumVolumeResult
from cirq.testing import assert_json_roundtrip_works
from cirq.contrib.json import DEFAULT_CONTRIB_RESOLVERS
from cirq.contrib.acquaintance import SwapPermutationGate
def test_quantum_volume():
qubits = cirq.LineQubit.range(5)
qvr = QuantumVolumeResult(
model_circuit=cirq.Circuit(cirq.H.on_each(qubits)),
heavy_set=[1, 2, 3],
compiled_circuit=cirq.Circuit(cirq.H.on_each(qubits)),
sampler_result=.1)
assert_json_roundtrip_works(qvr, resolvers=DEFAULT_CONTRIB_RESOLVERS)
def test_swap_permutation_gate():
gate = SwapPermutationGate(swap_gate=cirq.SWAP)
assert_json_roundtrip_works(gate, resolvers=DEFAULT_CONTRIB_RESOLVERS)
|
460855
|
from django.contrib.auth.models import User
from django.template.loaders.app_directories import Loader
class LoaderWithSQL(Loader):
def get_template(self, *args, **kwargs):
# Force the template loader to run some SQL. Simulates a CMS.
User.objects.all().count()
return super().get_template(*args, **kwargs)
|
460875
|
from plotly.offline import iplot, _plot_html
from IPython.display import HTML, display
import ipywidgets as widgets
def my_iplot(figure_or_data, show_link=False, link_text='Export to plot.ly',
validate=True, image=None, filename='plot_image', image_width=800,
image_height=600) :
plot_html, plotdivid, width, height = _plot_html(
figure_or_data, show_link, link_text, validate,
'100%', 525, global_requirejs=True)
#display(HTML(plot_html))
wid = widgets.HTML(
value=plot_html,
placeholder='Some HTML',
description='Some HTML',
disabled=False
)
return (wid, plotdivid)
|
460888
|
import FWCore.ParameterSet.Config as cms
DTEffAnalyzer = cms.EDAnalyzer("DTEffAnalyzer",
recHits2DLabel = cms.string('dt2DSegments'),
minHitsSegment = cms.int32(5),
minCloseDist = cms.double(20.0),
recHits4DLabel = cms.string('dt4DSegments'),
rootFileName = cms.untracked.string('DTEffAnalyzer.root'),
debug = cms.untracked.bool(False),
recHits1DLabel = cms.string('dt1DRecHits'),
minChi2NormSegment = cms.double(20.0)
)
|
460943
|
import os
import datetime
class Config:
myemail = os.getenv('MY_EMAIL', '')
user_home_dir = os.path.expanduser('~')
# ml code location
ml_dir = os.path.abspath('.rejected_article_tracker/src/ML')
# data_locations
main_data_dir = os.path.join(user_home_dir,'rejected_article_tracker')
ml_data_dir = os.path.join(main_data_dir,'data')
oai_pmh_dataloc = os.path.join(ml_data_dir,'oai_pmh_data')
crossref_doi_dataloc = os.path.join(ml_data_dir,'cr_doi_results.jsonl')
crossref_search_dataloc = os.path.join(ml_data_dir,'cr_search_results.json')
crossref_search_jsonl_dataloc = os.path.join(ml_data_dir,'cr_search_results.jsonl')
training_dataloc = os.path.join(ml_data_dir,'training_dataframe.csv')
clean_training_dataloc = os.path.join(ml_data_dir,'clean_training_dataframe.csv')
# model locations
# file_dir = os.path.dirname(__file__)
ml_model_dir = os.path.join(user_home_dir,'rejected_article_tracker','models')
old_logreg_model_loc = os.path.join(ml_model_dir, 'lr_model')
new_logreg_model_loc = os.path.join(ml_model_dir, 'lr_model_new')
# training data parameters
# CARE with these.
# the API reader will pull everything UPDATED after the start year
# It will stop when it reaches n_recs_from_oai_pmh
# if the number of docs at that point is < max_training_docs,
# the process will repeat.
start_year_for_training = 2012
current_year = int(datetime.datetime.now().year)
allowed_values = list(range(2007,current_year+1))
if start_year_for_training not in allowed_values:
raise ValueError('Invalid value for start_year_for_training. Must be between 2007 and current year.')
# If using 2012, we get around 80k docs (with ~50% having DOIs)
# So this number is actually double the number of training docs we will use
# We will check the total before processing any data
max_training_docs = 90000
# here 200000 works for 2012, but if you want to try a different year
# you will need to figure out the right number of docs to acquire
# this will be approximately the number of preprints added to arXiv
# since 2007.
n_recs_from_oai_pmh = 200000
predictor_cols = [
'similarity',
'author_match_all',
# NOTE - you can't use score because the
# DOI lookups all default to score ==1.0
# potentially limit data to results where we have a score!=1?
# 'score',
# 'rank',
'n_auths_query'
]
target_col = 'correct_yn'
|
460998
|
from conans import ConanFile, CMake, tools
import os
required_conan_version = ">=1.33.0"
class TidyHtml5Conan(ConanFile):
name = "tidy-html5"
license = "W3C"
url = "https://github.com/conan-io/conan-center-index"
homepage = "http://www.html-tidy.org"
description = "The granddaddy of HTML tools, with support for modern standards"
topics = ("html", "parser", "xml", "tools")
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
"support_localizations": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
"support_localizations": True,
}
exports_sources = ["CMakeLists.txt", "patches/**"]
generators = "cmake"
_cmake = None
@property
def _source_subfolder(self):
return "source_subfolder"
@property
def _build_subfolder(self):
return "build_subfolder"
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
if self.options.shared:
del self.options.fPIC
del self.settings.compiler.libcxx
del self.settings.compiler.cppstd
def source(self):
tools.get(**self.conan_data["sources"][self.version], strip_root=True, destination=self._source_subfolder)
def _configure_cmake(self):
if self._cmake:
return self._cmake
cmake = CMake(self)
cmake.definitions['BUILD_TAB2SPACE'] = False
cmake.definitions['BUILD_SAMPLE_CODE'] = False
cmake.definitions['TIDY_COMPAT_HEADERS'] = False
cmake.definitions['SUPPORT_CONSOLE_APP'] = False
cmake.definitions['SUPPORT_LOCALIZATIONS'] = self.options.support_localizations
cmake.definitions['ENABLE_DEBUG_LOG'] = False
cmake.definitions['ENABLE_ALLOC_DEBUG'] = False
cmake.definitions['ENABLE_MEMORY_DEBUG'] = False
cmake.definitions['BUILD_SHARED_LIB'] = self.options.shared
cmake.configure(build_folder=self._build_subfolder)
self._cmake = cmake
return self._cmake
def build(self):
for patch in self.conan_data.get("patches", {}).get(self.version, []):
tools.patch(**patch)
cmake = self._configure_cmake()
cmake.build()
def package(self):
self.copy("COPYING", dst="licenses", src=self._source_subfolder)
self.copy("LICENSE.md", dst="licenses", src=os.path.join(self._source_subfolder, 'README'))
cmake = self._configure_cmake()
cmake.install()
tools.rmdir(os.path.join(self.package_folder, "lib", "pkgconfig"))
tools.remove_files_by_mask(os.path.join(self.package_folder, "lib"), "*.pdb")
if self.options.shared:
to_remove = "*tidy_static*" if self.settings.os == "Windows" else "*.a"
tools.remove_files_by_mask(os.path.join(self.package_folder, "lib"), to_remove)
def package_info(self):
self.cpp_info.names["pkg_config"] = "tidy"
suffix = "_static" if self.settings.os == "Windows" and not self.options.shared else ""
suffix += "d" if self.settings.compiler == "Visual Studio" and self.settings.build_type == "Debug" else ""
self.cpp_info.libs = ["tidy" + suffix]
if self.settings.os == "Windows" and not self.options.shared:
self.cpp_info.defines.append("TIDY_STATIC")
|
461026
|
from cx_const import DefaultActionsMapping, Light
from cx_core import LightController
from cx_core.integration import EventData
class SNZB01LightController(LightController):
def get_z2m_actions_mapping(self) -> DefaultActionsMapping:
return {
"single": Light.TOGGLE, # single click
"double": Light.ON_FULL_BRIGHTNESS, # double click
"long": Light.ON_MIN_BRIGHTNESS, # hold
}
def get_zha_actions_mapping(self) -> DefaultActionsMapping:
return {
"toggle": Light.TOGGLE, # single click
"on": Light.ON_FULL_BRIGHTNESS, # double click
"off": Light.ON_MIN_BRIGHTNESS, # hold
}
def get_zha_action(self, data: EventData) -> str:
command: str = data["command"]
return command
|
461033
|
from flask_restly.decorator import resource, get
from flask_restly import FlaskRestly
from flask import Flask
def test_should_register_resource():
app = Flask(__name__)
FlaskRestly(app)
@resource(name='test')
class SomeResource:
@get('/')
def get(self):
return dict()
with app.app_context():
SomeResource()
with app.test_client() as client:
response = client.get('/api/rest/v1/test')
assert response.status_code == 200
assert response.get_json() == {}
def test_should_register_resources_with_same_method_names():
app = Flask(__name__)
FlaskRestly(app)
@resource(name='test')
class SomeResource:
@get('/')
def get(self):
return dict()
@resource(name='test2')
class SomeResource2:
@get('/')
def get(self):
return dict()
with app.app_context():
SomeResource()
SomeResource2()
with app.test_client() as client:
response = client.get('/api/rest/v1/test')
assert response.status_code == 200
assert response.get_json() == {}
response = client.get('/api/rest/v1/test2')
assert response.status_code == 200
assert response.get_json() == {}
def test_should_register_resource_with_subresource_with_same_method_names():
app = Flask(__name__)
FlaskRestly(app)
@resource(name='parent')
class SomeResource:
@get('/')
def get(self):
return dict()
@resource(name='child', parent=SomeResource)
class SomeResource2:
@get('/')
def get(self, **kwargs):
return dict()
with app.app_context():
SomeResource()
SomeResource2()
with app.test_client() as client:
response = client.get('/api/rest/v1/parent')
assert response.status_code == 200
assert response.get_json() == {}
response = client.get('/api/rest/v1/parent/1/child')
assert response.status_code == 200
assert response.get_json() == {}
def test_should_register_different_api_version_resources_with_same_method_names():
app = Flask(__name__)
FlaskRestly(app)
@resource(name='test')
class SomeResource:
@get('/')
def get(self):
return dict()
@resource(name='test', version=2)
class SomeResource2:
@get('/')
def get(self):
return dict()
with app.app_context():
SomeResource()
SomeResource2()
with app.test_client() as client:
response = client.get('/api/rest/v1/test')
assert response.status_code == 200
assert response.get_json() == {}
response = client.get('/api/rest/v2/test')
assert response.status_code == 200
assert response.get_json() == {}
def test_should_register_subresource():
app = Flask(__name__)
FlaskRestly(app)
@resource(name='parent')
class ParentResource:
@get('/')
def get_parent(self):
return dict()
@resource(name='child', parent=ParentResource)
class ChildResource:
@get('/')
def get_child(self, parent_id):
return dict(parent_id=int(parent_id))
with app.app_context():
ChildResource()
ParentResource()
with app.test_client() as client:
response = client.get('/api/rest/v1/parent/1/child')
assert response.status_code == 200
data = response.get_json()
assert data['parent_id'] == 1
def test_should_register_nested_subresources():
app = Flask(__name__)
FlaskRestly(app)
@resource(name='resource')
class Resource:
@get('/')
def get(self):
return dict()
@resource(name='subresource1', parent=Resource)
class Subresource1:
@get('/')
def get(self, **kwargs):
pass
@resource(name='subresource2', parent=Subresource1)
class Subresource2:
@get('/')
def get(self, **kwargs):
pass
@resource(name='subresource3', parent=Subresource2)
class Subresource3:
@get('/')
def get(self, **kwargs):
pass
@resource(name='subresource4', parent=Subresource3)
class Subresource4:
@get('/')
def get(self, resource_id, subresource1_id, subresource2_id, subresource3_id):
return dict(
resource_id=int(resource_id),
subresource1_id=int(subresource1_id),
subresource2_id=int(subresource2_id),
subresource3_id=int(subresource3_id),
)
with app.app_context():
Resource()
Subresource1()
Subresource2()
Subresource3()
Subresource4()
with app.test_client() as client:
response = client.get('/api/rest/v1/resource/1/subresource1/23/subresource2/45/subresource3/67/subresource4')
assert response.status_code == 200
data = response.get_json()
assert data['resource_id'] == 1
assert data['subresource1_id'] == 23
assert data['subresource2_id'] == 45
assert data['subresource3_id'] == 67
|
461041
|
import pytest
import pandas as pd
from ..importing import attempt_import
def test_import_module():
pandas = attempt_import('pandas', "Pandas package is not installed")
assert pandas is pd
with pytest.raises(RuntimeError) as err:
attempt_import('a_module_which_should_not_exist',
'this module does not exist')
assert(str(err.value) == 'this module does not exist')
|
461080
|
from __future__ import unicode_literals
from ..elements.elementbase import Attribute
from ..tags.context import ContextElementBase
from .. import namespaces
from .. import logic
class Check(ContextElementBase):
"""A pre-flight check"""
xmlns = namespaces.preflight
class Help:
synopsis = "define a pre-flight test"
class Result(ContextElementBase):
xmlns = namespaces.preflight
exit = Attribute("Also exit the check", type="boolean", default=False)
status = None
class Help:
undocumented = True
def logic(self, context):
check = self.get_ancestor((self.xmlns, "check"))
text = context.sub(self.text)
context[".preflight"].append((check, self.status, text))
if self.exit(context):
raise logic.Unwind()
class Pass(Result):
status = "pass"
class Help:
synopsis = "pass a preflight check"
class Fail(Result):
status = "fail"
class Help:
synopsis = "fail a preflight check"
class Warning(Result):
status = "warning"
class Help:
synopsis = "add a warning result to a preflight check"
|
461126
|
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
import torchvision.transforms as transforms
import cv2
import torch
from torch.utils import data
from torch.nn import functional as F
from torch.autograd import Function
import random
import math
def visualize(img_arr):
plt.imshow(((img_arr.detach().numpy().transpose(1, 2, 0) + 1.0) * 127.5).astype(np.uint8))
plt.axis('off')
def save_image(img, filename):
tmp = ((img.detach().numpy().transpose(1, 2, 0) + 1.0) * 127.5).astype(np.uint8)
cv2.imwrite(filename, cv2.cvtColor(tmp, cv2.COLOR_RGB2BGR))
def load_image(filename):
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.5, 0.5, 0.5],std=[0.5,0.5,0.5]),
])
img = Image.open(filename)
img = transform(img)
return img.unsqueeze(dim=0)
def requires_grad(model, flag=True):
for p in model.parameters():
p.requires_grad = flag
def accumulate(model1, model2, decay=0.999):
par1 = dict(model1.named_parameters())
par2 = dict(model2.named_parameters())
for k in par1.keys():
par1[k].data.mul_(decay).add_(par2[k].data, alpha=1 - decay)
def adv_loss(logits, target):
assert target in [1, 0]
targets = torch.full_like(logits, fill_value=target)
loss = F.binary_cross_entropy_with_logits(logits, targets)
return loss
def r1_reg(d_out, x_in):
# zero-centered gradient penalty for real images
batch_size = x_in.size(0)
grad_dout = torch.autograd.grad(
outputs=d_out.sum(), inputs=x_in,
create_graph=True, retain_graph=True, only_inputs=True
)[0]
grad_dout2 = grad_dout.pow(2)
assert(grad_dout2.size() == x_in.size())
reg = 0.5 * grad_dout2.view(batch_size, -1).sum(1).mean(0)
return reg
def moving_average(model, model_test, beta=0.999):
for param, param_test in zip(model.parameters(), model_test.parameters()):
param_test.data = torch.lerp(param.data, param_test.data, beta)
# tensors[NnetD][Nfeatures]2B*C*W*H --> tensors[NnetD][Nfeatures]B*C*W*H, tensors[NnetD][Nfeatures]B*C*W*H
# Take the prediction of fake and real images from the combined batch
def divide_pred(pred):
# the prediction contains the intermediate outputs of multiscale GAN,
# so it's usually a list
if type(pred) == list:
fake = []
real = []
for p in pred:
fake.append([tensor[:tensor.size(0) // 2] for tensor in p])
real.append([tensor[tensor.size(0) // 2:] for tensor in p])
else:
fake = pred[:pred.size(0) // 2]
real = pred[pred.size(0) // 2:]
return fake, real
|
461140
|
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.optim
import torch.nn.functional as F
import torch.optim.lr_scheduler as lr_scheduler
import time
import os
import glob
from itertools import combinations
import configs
import backbone
from data.datamgr import SimpleDataManager, SetDataManager
from io_utils import model_dict, parse_args, get_resume_file, get_best_file, get_assigned_file
from utils import *
import backbone
from datasets import ISIC_few_shot, EuroSAT_few_shot, CropDisease_few_shot, Chest_few_shot
class Classifier(nn.Module):
def __init__(self, dim, n_way):
super(Classifier, self).__init__()
self.fc = nn.Linear(dim, n_way)
def forward(self, x):
x = self.fc(x)
return x
def train_loss_cross_validation(embeddings, y_a_i, support_size, n_support, n_way, total_epoch):
embeddings = Variable(embeddings).cuda()
all_losses = []
for r in range(n_support):
train_embeddings = []
val_embeddings = []
train_y = []
val_y = []
for idx in range(embeddings.size()[0]):
if (idx - r) % n_support == 0:
val_embeddings.append(embeddings[idx, :].view(1, embeddings[idx, :].size()[0]))
val_y.append(y_a_i[idx])
else:
train_embeddings.append(embeddings[idx, :].view(1, embeddings[idx, :].size()[0]))
train_y.append(y_a_i[idx])
train_y = np.asarray(train_y)
val_y = np.asarray(val_y)
val_embeddings = torch.cat(val_embeddings, 0)
train_embeddings = torch.cat(train_embeddings, 0)
loss_fn = nn.CrossEntropyLoss().cuda()
net = Classifier(train_embeddings.size()[1], n_way).cuda()
classifier_opt = torch.optim.SGD(net.parameters(), lr = 0.01, momentum=0.9, dampening=0.9, weight_decay=0.001)
train_y = Variable(torch.from_numpy(train_y)).cuda()
train_size = support_size - n_support
batch_size = 4
for epoch in range(total_epoch):
rand_id = np.random.permutation(train_size)
for j in range(0, train_size, batch_size):
classifier_opt.zero_grad()
#####################################
selected_id = torch.from_numpy( rand_id[j: min(j+batch_size, train_size)]).cuda()
z_batch = train_embeddings[selected_id]
y_batch = train_y[selected_id]
#####################################
outputs = net(z_batch)
#####################################
loss = loss_fn(outputs, y_batch)
loss.backward()
classifier_opt.step()
val_y = Variable(torch.from_numpy(val_y)).cuda() # (25,)
outputs = net(val_embeddings)
loss = loss_fn(outputs, val_y)
all_losses.append(loss)
return sum(all_losses) / (len(all_losses) + 0.0)
def train_loss_half_validation(embeddings, y_a_i, support_size, n_support, n_way, total_epoch):
embeddings = embeddings.cpu().numpy()
train_embeddings = []
val_embeddings = []
train_y = []
val_y = []
for idx in range(support_size):
if (idx % 10) % 2 == 0:
val_embeddings.append(embeddings[idx, :].reshape(1, embeddings[idx, :].shape[0]))
val_y.append(y_a_i[idx])
else:
train_embeddings.append(embeddings[idx, :].reshape(1, embeddings[idx, :].shape[0]))
train_y.append(y_a_i[idx])
train_y = np.asarray(train_y)
val_y = np.asarray(val_y)
val_embeddings = torch.from_numpy(np.concatenate( val_embeddings, axis=0 ))
train_embeddings = torch.from_numpy(np.concatenate( train_embeddings, axis=0 ))
loss_fn = nn.CrossEntropyLoss().cuda()
net = Classifier(train_embeddings.size()[1], n_way).cuda()
classifier_opt = torch.optim.SGD(net.parameters(), lr = 0.01, momentum=0.9, dampening=0.9, weight_decay=0.001)
train_y = Variable(torch.from_numpy(train_y)).cuda()
train_embeddings = Variable(train_embeddings).cuda()
train_size = support_size / 2
batch_size = 4
for epoch in range(total_epoch):
rand_id = np.random.permutation(train_size)
for j in range(0, train_size, batch_size):
classifier_opt.zero_grad()
#####################################
selected_id = torch.from_numpy( rand_id[j: min(j+batch_size, train_size)]).cuda()
z_batch = train_embeddings[selected_id]
y_batch = train_y[selected_id]
#####################################
outputs = net(z_batch)
#####################################
loss = loss_fn(outputs, y_batch)
loss.backward()
classifier_opt.step()
val_embeddings = Variable(val_embeddings).cuda()
val_y = Variable(torch.from_numpy(val_y)).cuda()
outputs = net(val_embeddings)
loss = loss_fn(outputs, val_y)
return loss
def combine_model(model_embeddings, y_a_i, support_size, n_support, n_way, cross_validation_epoch, with_replacement=True):
embeddings_idx_model = []
embeddings_all = None
min_loss = float("inf")
for num in range(len(model_embeddings)):
embedding_candidate = None
idx_candidate = -1
for idx, embedding in enumerate(model_embeddings):
if embeddings_all is None:
if n_support == 20 or 50:
running_loss = train_loss_half_validation(embedding, y_a_i, support_size, n_support, n_way, cross_validation_epoch)
else:
running_loss = train_loss_cross_validation(embeddings, y_a_i, support_size, n_support, n_way, cross_validation_epoch)
else:
tmp_embedding = torch.cat((embeddings_all, embedding), 1)
if n_support == 20 or 50:
running_loss = train_loss_half_validation(tmp_embedding, y_a_i, support_size, n_support, n_way, cross_validation_epoch)
else:
running_loss = train_loss_cross_validation(embeddings, y_a_i, support_size, n_support,n_way, cross_validation_epoch)
if running_loss < min_loss:
embedding_candidate = embedding
idx_candidate = idx
min_loss = running_loss
if with_replacement:
if idx_candidate != -1:
embeddings_idx_model.append(idx_candidate)
if embeddings_all is None:
embeddings_all = embedding_candidate
else:
embeddings_all = torch.cat((embeddings_all, embedding_candidate), 1)
else:
if idx_candidate not in embeddings_idx_model and idx_candidate != -1:
embeddings_idx_model.append(idx_candidate)
if embeddings_all is None:
embeddings_all = embedding_candidate
else:
embeddings_all = torch.cat((embeddings_all, embedding_candidate), 1)
return embeddings_idx_model, embeddings_all
def train_selection(all_embeddings, y_a_i, support_size, n_support, n_way, with_replacement=False):
embeddings_idx = []
cross_validation_epoch = 20
embeddings_best_of_each = []
embeddings_idx_of_each = []
for num in range(len(all_embeddings)):
embedding_candidate = None
idx_candidate = -1
min_loss = float("inf")
for idx, embedding in enumerate(all_embeddings[num]):
if n_support == 50 or 20:
running_loss = train_loss_half_validation(embedding, y_a_i, support_size, n_support, n_way, cross_validation_epoch)
else:
running_loss = train_loss_cross_validation(embeddings, y_a_i, support_size, n_support, n_way, cross_validation_epoch)
if running_loss < min_loss:
embedding_candidate = embedding
idx_candidate = idx
min_loss = running_loss
embeddings_idx_of_each.append(idx_candidate)
embeddings_best_of_each.append(embedding_candidate)
embeddings_idx_model, embeddings_all = combine_model(embeddings_best_of_each, y_a_i, support_size, n_support, n_way, cross_validation_epoch, with_replacement=with_replacement)
return embeddings_idx_of_each, embeddings_idx_model, embeddings_all, embeddings_best_of_each
def test_loop(novel_loader, return_std = False, loss_type="softmax", n_query = 15, models_to_use=[], finetune_each_model = False, n_way = 5, n_support = 5): #overwrite parrent function
correct = 0
count = 0
iter_num = len(novel_loader)
acc_all = []
for _, (x, y) in enumerate(novel_loader):
###############################################################################################
pretrained_models = []
for _ in range(len(models_to_use)):
pretrained_models.append(model_dict[params.model]())
###############################################################################################
for idx, dataset_name in enumerate(models_to_use):
checkpoint_dir = '%s/checkpoints/%s/%s_%s' %(configs.save_dir, models_to_use[idx], params.model, params.method)
if params.train_aug:
checkpoint_dir += '_aug'
params.save_iter = -1
if params.save_iter != -1:
modelfile = get_assigned_file(checkpoint_dir, params.save_iter)
elif params.method in ['baseline', 'baseline++'] :
modelfile = get_resume_file(checkpoint_dir)
else:
modelfile = get_best_file(checkpoint_dir)
tmp = torch.load(modelfile)
state = tmp['state']
state_keys = list(state.keys())
for _, key in enumerate(state_keys):
if "feature." in key:
newkey = key.replace("feature.","") # an architecture model has attribute 'feature', load architecture feature to backbone by casting name from 'feature.trunk.xx' to 'trunk.xx'
state[newkey] = state.pop(key)
else:
state.pop(key)
pretrained_models[idx].load_state_dict(state)
###############################################################################################
n_query = x.size(1) - n_support
x = x.cuda()
x_var = Variable(x)
batch_size = 4
support_size = n_way * n_support
##################################################################################
if finetune_each_model:
for idx, model_name in enumerate(pretrained_models):
pretrained_models[idx].cuda()
pretrained_models[idx].train()
x_a_i = x_var[:,:n_support,:,:,:].contiguous().view( n_way* n_support, *x.size()[2:]) # (25, 3, 224, 224)
loss_fn = nn.CrossEntropyLoss().cuda()
cnet = Classifier(pretrained_models[idx].final_feat_dim, n_way).cuda()
classifier_opt = torch.optim.SGD(cnet.parameters(), lr = 0.01, momentum=0.9, dampening=0.9, weight_decay=0.001)
feature_opt = torch.optim.SGD(pretrained_models[idx].parameters(), lr = 0.01, momentum=0.9, dampening=0.9, weight_decay=0.001)
x_a_i = Variable(x_a_i).cuda()
y_a_i = Variable( torch.from_numpy( np.repeat(range( n_way ), n_support ) )).cuda() # (25,)
train_size = support_size
batch_size = 4
for epoch in range(100):
rand_id = np.random.permutation(train_size)
for j in range(0, train_size, batch_size):
classifier_opt.zero_grad()
feature_opt.zero_grad()
#####################################
selected_id = torch.from_numpy( rand_id[j: min(j+batch_size, train_size)]).cuda()
z_batch = x_a_i[selected_id]
y_batch = y_a_i[selected_id]
#####################################
outputs = pretrained_models[idx](z_batch)
outputs = cnet(outputs)
#####################################
loss = loss_fn(outputs, y_batch)
loss.backward()
classifier_opt.step()
feature_opt.step()
###############################################################################################
for idx, model_name in enumerate(pretrained_models):
pretrained_models[idx].cuda()
pretrained_models[idx].eval()
###############################################################################################
all_embeddings_train = []
for idx, model_name in enumerate(pretrained_models):
model_embeddings = []
x_a_i = x_var[:,:n_support,:,:,:].contiguous().view( n_way* n_support, *x.size()[2:]) # (25, 3, 224, 224)
for idx, module in enumerate(pretrained_models[idx].trunk):
x_a_i = module(x_a_i)
if len(list(x_a_i.size())) == 4:
embedding = F.adaptive_avg_pool2d(x_a_i, (1, 1)).squeeze()
model_embeddings.append(embedding.detach())
if params.model == "ResNet10" or params.model == "ResNet18":
model_embeddings = model_embeddings[4:-1]
elif params.model == "Conv4":
model_embeddings = model_embeddings
all_embeddings_train.append(model_embeddings)
##########################################################
y_a_i = np.repeat(range( n_way ), n_support )
embeddings_idx_of_each, embeddings_idx_model, embeddings_train, embeddings_best_of_each = train_selection(all_embeddings_train, y_a_i, support_size, n_support, n_way, with_replacement=True)
##########################################################
all_embeddings_test = []
for idx, model_name in enumerate(pretrained_models):
model_embeddings = []
x_b_i = x_var[:, n_support:,:,:,:].contiguous().view( n_way* n_query, *x.size()[2:])
for idx, module in enumerate(pretrained_models[idx].trunk):
x_b_i = module(x_b_i)
if len(list(x_b_i.size())) == 4:
embedding = F.adaptive_avg_pool2d(x_b_i, (1, 1)).squeeze()
model_embeddings .append(embedding.detach())
if params.model == "ResNet10" or params.model == "ResNet18":
model_embeddings = model_embeddings[4:-1]
elif params.model == "Conv4":
model_embeddings = model_embeddings
all_embeddings_test.append(model_embeddings)
############################################################################################
embeddings_test = []
for index in embeddings_idx_model:
embeddings_test.append(all_embeddings_test[index][embeddings_idx_of_each[index]])
embeddings_test = torch.cat(embeddings_test, 1)
############################################################################################
y_a_i = Variable( torch.from_numpy( np.repeat(range( n_way ), n_support ) )).cuda() # (25,)
net = Classifier(embeddings_test.size()[1], n_way).cuda()
loss_fn = nn.CrossEntropyLoss().cuda()
classifier_opt = torch.optim.SGD(net.parameters(), lr = 0.01, momentum=0.9, dampening=0.9, weight_decay=0.001)
total_epoch = 100
embeddings_train = Variable(embeddings_train.cuda())
net.train()
for epoch in range(total_epoch):
rand_id = np.random.permutation(support_size)
for j in range(0, support_size, batch_size):
classifier_opt.zero_grad()
#####################################
selected_id = torch.from_numpy( rand_id[j: min(j+batch_size, support_size)]).cuda()
z_batch = embeddings_train[selected_id]
y_batch = y_a_i[selected_id]
#####################################
outputs = net(z_batch)
#####################################
loss = loss_fn(outputs, y_batch)
loss.backward()
classifier_opt.step()
embeddings_test = Variable(embeddings_test.cuda())
scores = net(embeddings_test)
y_query = np.repeat(range( n_way ), n_query )
topk_scores, topk_labels = scores.data.topk(1, 1, True, True)
topk_ind = topk_labels.cpu().numpy()
top1_correct = np.sum(topk_ind[:,0] == y_query)
correct_this, count_this = float(top1_correct), len(y_query)
print (correct_this/ count_this *100)
acc_all.append((correct_this/ count_this *100))
###############################################################################################
acc_all = np.asarray(acc_all)
acc_mean = np.mean(acc_all)
acc_std = np.std(acc_all)
print('%d Test Acc = %4.2f%% +- %4.2f%%' %(iter_num, acc_mean, 1.96* acc_std/np.sqrt(iter_num)))
if __name__=='__main__':
np.random.seed(10)
params = parse_args('train')
##################################################################
image_size = 224
iter_num = 600
n_query = max(1, int(16* params.test_n_way/params.train_n_way)) #if test_n_way is smaller than train_n_way, reduce n_query to keep batch size small
few_shot_params = dict(n_way = params.test_n_way , n_support = params.n_shot)
models_to_use = params.models_to_use
finetune_each_model = params.fine_tune_all_models
##################################################################
dataset_names = ["ISIC", "EuroSAT", "CropDisease", "Chest"]
novel_loaders = []
datamgr = ISIC_few_shot.SetDataManager(image_size, n_eposide = iter_num, n_query = 15, **few_shot_params)
novel_loader = datamgr.get_data_loader(aug =False)
novel_loaders.append(novel_loader)
datamgr = EuroSAT_few_shot.SetDataManager(image_size, n_eposide = iter_num, n_query = 15, **few_shot_params)
novel_loader = datamgr.get_data_loader(aug =False)
novel_loaders.append(novel_loader)
datamgr = CropDisease_few_shot.SetDataManager(image_size, n_eposide = iter_num, n_query = 15, **few_shot_params)
novel_loader = datamgr.get_data_loader(aug =False)
novel_loaders.append(novel_loader)
datamgr = Chest_few_shot.SetDataManager(image_size, n_eposide = iter_num, n_query = 15, **few_shot_params)
novel_loader = datamgr.get_data_loader(aug =False)
novel_loaders.append(novel_loader)
#########################################################################
for idx, novel_loader in enumerate(novel_loaders):
print dataset_names[idx]
start_epoch = params.start_epoch
stop_epoch = params.stop_epoch
test_loop(novel_loader, return_std = False, n_query = 15, models_to_use=models_to_use, finetune_each_model = finetune_each_model, **few_shot_params)
|
461161
|
import os
import shapely
from affine import Affine
import rasterio
from rasterio.warp import transform_bounds
from ..utils.geo import list_to_affine, _reduce_geom_precision
from ..utils.core import _check_gdf_load
from ..raster_image.image import get_geo_transform
from shapely.geometry import box, Polygon
import pandas as pd
import geopandas as gpd
from rtree.core import RTreeError
def convert_poly_coords(geom, raster_src=None, affine_obj=None, inverse=False,
precision=None):
"""Georegister geometry objects currently in pixel coords or vice versa.
Arguments
---------
geom : :class:`shapely.geometry.shape` or str
A :class:`shapely.geometry.shape`, or WKT string-formatted geometry
object currently in pixel coordinates.
raster_src : str, optional
Path to a raster image with georeferencing data to apply to `geom`.
Alternatively, an opened :class:`rasterio.Band` object or
:class:`osgeo.gdal.Dataset` object can be provided. Required if not
using `affine_obj`.
affine_obj: list or :class:`affine.Affine`
An affine transformation to apply to `geom` in the form of an
``[a, b, d, e, xoff, yoff]`` list or an :class:`affine.Affine` object.
Required if not using `raster_src`.
inverse : bool, optional
If true, will perform the inverse affine transformation, going from
geospatial coordinates to pixel coordinates.
precision : int, optional
Decimal precision for the polygon output. If not provided, rounding
is skipped.
Returns
-------
out_geom
A geometry in the same format as the input with its coordinate system
transformed to match the destination object.
"""
if not raster_src and not affine_obj:
raise ValueError("Either raster_src or affine_obj must be provided.")
if raster_src is not None:
affine_xform = get_geo_transform(raster_src)
else:
if isinstance(affine_obj, Affine):
affine_xform = affine_obj
else:
# assume it's a list in either gdal or "standard" order
# (list_to_affine checks which it is)
if len(affine_obj) == 9: # if it's straight from rasterio
affine_obj = affine_obj[0:6]
affine_xform = list_to_affine(affine_obj)
if inverse: # geo->px transform
affine_xform = ~affine_xform
if isinstance(geom, str):
# get the polygon out of the wkt string
g = shapely.wkt.loads(geom)
elif isinstance(geom, shapely.geometry.base.BaseGeometry):
g = geom
else:
raise TypeError('The provided geometry is not an accepted format. ' +
'This function can only accept WKT strings and ' +
'shapely geometries.')
xformed_g = shapely.affinity.affine_transform(g, [affine_xform.a,
affine_xform.b,
affine_xform.d,
affine_xform.e,
affine_xform.xoff,
affine_xform.yoff])
if isinstance(geom, str):
# restore to wkt string format
xformed_g = shapely.wkt.dumps(xformed_g)
if precision is not None:
xformed_g = _reduce_geom_precision(xformed_g, precision=precision)
return xformed_g
def affine_transform_gdf(gdf, affine_obj, inverse=False, geom_col="geometry",
precision=None):
"""Perform an affine transformation on a GeoDataFrame.
Arguments
---------
gdf : :class:`geopandas.GeoDataFrame`, :class:`pandas.DataFrame`, or `str`
A GeoDataFrame, pandas DataFrame with a ``"geometry"`` column (or a
different column containing geometries, identified by `geom_col` -
note that this column will be renamed ``"geometry"`` for ease of use
with geopandas), or the path to a saved file in .geojson or .csv
format.
affine_obj : list or :class:`affine.Affine`
An affine transformation to apply to `geom` in the form of an
``[a, b, d, e, xoff, yoff]`` list or an :class:`affine.Affine` object.
inverse : bool, optional
Use this argument to perform the inverse transformation.
geom_col : str, optional
The column in `gdf` corresponding to the geometry. Defaults to
``'geometry'``.
precision : int, optional
Decimal precision to round the geometries to. If not provided, no
rounding is performed.
"""
if isinstance(gdf, str): # assume it's a geojson
if gdf.lower().endswith('json'):
gdf = gpd.read_file(gdf)
elif gdf.lower().endswith('csv'):
gdf = pd.read_csv(gdf)
gdf = gdf.rename(columns={geom_col: 'geometry'})
if not isinstance(gdf['geometry'][0], Polygon):
gdf['geometry'] = gdf['geometry'].apply(shapely.wkt.loads)
else:
raise ValueError(
"The file format is incompatible with this function.")
gdf["geometry"] = gdf["geometry"].apply(convert_poly_coords,
affine_obj=affine_obj,
inverse=inverse)
if precision is not None:
gdf['geometry'] = gdf['geometry'].apply(
_reduce_geom_precision, precision=precision)
return gdf
def georegister_px_df(df, im_fname=None, affine_obj=None, crs=None,
geom_col='geometry', precision=None):
"""Convert a dataframe of geometries in pixel coordinates to a geo CRS.
Arguments
---------
df : :class:`pandas.DataFrame`
A :class:`pandas.DataFrame` with polygons in a column named
``"geometry"``.
im_fname : str, optional
A filename or :class:`rasterio.DatasetReader` object containing an
image that has the same bounds as the pixel coordinates in `df`. If
not provided, `affine_obj` and `crs` must both be provided.
affine_obj : `list` or :class:`affine.Affine`, optional
An affine transformation to apply to `geom` in the form of an
``[a, b, d, e, xoff, yoff]`` list or an :class:`affine.Affine` object.
Required if not using `raster_src`.
crs : dict, optional
The coordinate reference system for the output GeoDataFrame. Required
if not providing a raster image to extract the information from. Format
should be ``{'init': 'epsgxxxx'}``, replacing xxxx with the EPSG code.
geom_col : str, optional
The column containing geometry in `df`. If not provided, defaults to
``"geometry"``.
precision : int, optional
The decimal precision for output geometries. If not provided, the
vertex locations won't be rounded.
"""
if im_fname is not None:
affine_obj = rasterio.open(im_fname).transform
crs = rasterio.open(im_fname).crs
else:
if not affine_obj or not crs:
raise ValueError(
'If an image path is not provided, ' +
'affine_obj and crs must be.')
tmp_df = affine_transform_gdf(df, affine_obj, geom_col=geom_col,
precision=precision)
return gpd.GeoDataFrame(tmp_df, crs=crs)
def geojson_to_px_gdf(geojson, im_path, precision=None):
"""Convert a geojson or set of geojsons from geo coords to px coords.
Arguments
---------
geojson : str
Path to a geojson. This function will also accept a
:class:`pandas.DataFrame` or :class:`geopandas.GeoDataFrame` with a
column named ``'geometry'`` in this argument.
im_path : str
Path to a georeferenced image (ie a GeoTIFF) that geolocates to the
same geography as the `geojson`(s). If a directory, the bounds of each
GeoTIFF will be loaded in and all overlapping geometries will be
transformed. This function will also accept a
:class:`osgeo.gdal.Dataset` or :class:`rasterio.DatasetReader` with
georeferencing information in this argument.
precision : int, optional
The decimal precision for output geometries. If not provided, the
vertex locations won't be rounded.
Returns
-------
output_df : :class:`pandas.DataFrame`
A :class:`pandas.DataFrame` with all geometries in `geojson` that
overlapped with the image at `im_path` converted to pixel coordinates.
Additional columns are included with the filename of the source
geojson (if available) and images for reference.
"""
# get the bbox and affine transforms for the image
if isinstance(im_path, str):
bbox = box(*rasterio.open(im_path).bounds)
affine_obj = rasterio.open(im_path).transform
im_crs = rasterio.open(im_path).crs
else:
bbox = box(im_path.bounds)
affine_obj = im_path.transform
im_crs = im_path.crs
# make sure the geo vector data is loaded in as geodataframe(s)
gdf = _check_gdf_load(geojson)
overlap_gdf = get_overlapping_subset(gdf, bbox=bbox, bbox_crs=im_crs)
transformed_gdf = affine_transform_gdf(overlap_gdf, affine_obj=affine_obj,
inverse=True, precision=precision)
transformed_gdf['image_fname'] = os.path.split(im_path)[1]
return transformed_gdf
def get_overlapping_subset(gdf, im=None, bbox=None, bbox_crs=None):
"""Extract a subset of geometries in a GeoDataFrame that overlap with `im`.
Notes
-----
This function uses RTree's spatialindex, which is much faster (but slightly
less accurate) than direct comparison of each object for overlap.
Arguments
---------
gdf : :class:`geopandas.GeoDataFrame`
A :class:`geopandas.GeoDataFrame` instance or a path to a geojson.
im : :class:`rasterio.DatasetReader` or `str`, optional
An image object loaded with `rasterio` or a path to a georeferenced
image (i.e. a GeoTIFF).
bbox : `list` or :class:`shapely.geometry.Polygon`, optional
A bounding box (either a :class:`shapely.geometry.Polygon` or a
``[bottom, left, top, right]`` `list`) from an image. Has no effect
if `im` is provided (`bbox` is inferred from the image instead.) If
`bbox` is passed and `im` is not, a `bbox_crs` should be provided to
ensure correct geolocation - if it isn't, it will be assumed to have
the same crs as `gdf`.
Returns
-------
output_gdf : :class:`geopandas.GeoDataFrame`
A :class:`geopandas.GeoDataFrame` with all geometries in `gdf` that
overlapped with the image at `im`.
Coordinates are kept in the CRS of `gdf`.
"""
if not im and not bbox:
raise ValueError('Either `im` or `bbox` must be provided.')
if isinstance(gdf, str):
gdf = gpd.read_file(gdf)
if isinstance(im, str):
im = rasterio.open(im)
sindex = gdf.sindex
# use transform_bounds in case the crs is different - no effect if not
if im:
bbox = transform_bounds(im.crs, gdf.crs, *im.bounds)
else:
if isinstance(bbox, Polygon):
bbox = bbox.bounds
if not bbox_crs:
bbox_crs = gdf.crs
bbox = transform_bounds(bbox_crs, gdf.crs, *bbox)
try:
intersectors = list(sindex.intersection(bbox))
except RTreeError:
intersectors = []
return gdf.iloc[intersectors, :]
|
461166
|
import sys
import os
sys.path.append("/tensorflow/models/research")
from object_detection.utils import config_util
from config_file_module.ssd_resnet_50_fpn import config_ssd_resnet_fpn
from config_file_module.ssd_mobilenet_inception import config_ssd_mobilenet_inception
from config_file_module.frcnn_resnet_50_101 import config_frcnn_resnet_50_101
"""
modifies the config file of an architecture based on user input
Params
------
input_path: str
path of the original config file
config_params: dict
dict containing all user defined params
network_type: str
type of the network.
"""
def create_config_file(input_path, config_params, network_type):
configs = config_util.get_configs_from_pipeline_file(input_path)
if config_params['checkpoint_path'] is not None:
prefix = ""
for ckpt_file in os.listdir(os.path.join('/checkpoints/'+ network_type, config_params['checkpoint_path'])):
if ckpt_file.endswith(".index"):
prefix = ckpt_file.split(".index")[0]
config_params['checkpoint_path'] = '/checkpoints/'+network_type+'/'+config_params['checkpoint_path']+'/'+prefix
else:
config_params['checkpoint_path'] = '/weights/'+network_type+'/model.ckpt'
new_configs = None
if network_type == "ssd_mobilenet" or network_type == "ssd_inception":
new_configs = config_ssd_mobilenet_inception(configs, config_params)
elif network_type == "ssd_resnet_50" or network_type == "ssd_fpn":
new_configs = config_ssd_mobilenet_inception(configs, config_params)
elif network_type == "frcnn_resnet_50" or network_type == "frcnn_resnet_101":
new_configs = config_frcnn_resnet_50_101(configs, config_params)
pipeline_config = config_util.create_pipeline_proto_from_configs(new_configs)
config_util.save_pipeline_config(pipeline_config, '/training_dir/model')
|
461199
|
from app.server import server
from app.notifications.webapn import supports_web_apn, create_pushpackage_zip
from app.controllers import webapn
from app.helpers.render import render_json
from app.models.APNDevice import APNDevice, APNProvider
from app.session.csrf import csrf_protected
from app.models.User import User
from config import notifications
from flask import abort, request, session, g, send_file
from json import dumps as json_dumps
import bugsnag
"""
Safari Push Notification routes.
Because Apple likes to be special we have to make
seperate things to integrate with Web APN. Very important
to have caching enabled if doing APN because otherwise
this will be re-preparing the APN every time.
"""
@server.route("/webapn/get_identification", methods=['POST'])
@csrf_protected
def webapn_get_identification():
if not isinstance(g.user, User):
return abort(401)
# Generate a short-term expiring token
authorization_token = webapn.generate_temporary_id()
return render_json({'token': authorization_token})
@server.route("/webapn/v<int:version>/pushPackages/<web_apn_id>", methods=['POST'])
def webapn_get_push_package(version, web_apn_id):
if not supports_web_apn(web_apn_id) or not webapn.is_valid_webapn_version(version):
return abort(404)
# Validate authorization token (associated with user)
json = request.get_json(silent=True)
authorization_token = json.get('token', None)
if authorization_token is None:
return abort(401)
# Get the user behind the temporary token
user = webapn.get_temporary_id_user(authorization_token)
if not isinstance(user, User):
return abort(403)
# Now we create a 'device' this represents PNs for one device
# this includes an 'auth token' which is a secure association
# between the device and the authorized user
device = webapn.add_apn_device(user=user, provider=APNProvider.WEB_APN)
# Create the pushpackage with all this data
pushpackage = create_pushpackage_zip(device=device)
return send_file(pushpackage, attachment_filename='Axtell.pushpackage', as_attachment=True)
@server.route("/webapn/v<int:version>/devices/<device_token>/registrations/<web_apn_id>", methods=['POST'])
def webapn_add_registration(version, device_token, web_apn_id):
if not supports_web_apn(web_apn_id) or not webapn.is_valid_webapn_version(version):
return abort(404)
authorization_header = request.headers.get('Authorization', None)
if authorization_header is None:
return abort(401)
authorization_header = authorization_header.strip()
if not authorization_header.startswith('ApplePushNotifications'):
return abort(400)
authorization_token = authorization_header[len('ApplePushNotifications '):]
# 36 is the length of the UUID
if len(authorization_token) != 36:
return abort(400)
device = webapn.\
set_apn_device(
authorization_token=authorization_token,
provider=APNProvider.WEB_APN,
device_token=device_token
)
if not isinstance(device, APNDevice):
return abort(403)
return ('OK', 200)
@server.route("/webapn/v<int:version>/devices/<device_token>/registrations/<web_apn_id>", methods=['DELETE'])
def webapn_delete_registration(version, device_token, web_apn_id):
if not supports_web_apn(web_apn_id) or not webapn.is_valid_webapn_version(version):
return abort(404)
authorization_header = request.headers.get('Authorization', None)
if authorization_header is None:
return abort(401)
authorization_header = authorization_header.strip()
if not authorization_header.startswith('ApplePushNotifications'):
return abort(400)
authorization_token = authorization_header[len('ApplePushNotifications '):]
# 36 is the length of the UUID
if len(authorization_token) != 36:
return abort(400)
did_delete = webapn.\
delete_apn_device(
authorization_token=authorization_token,
provider=APNProvider.WEB_APN
)
if not did_delete:
return abort(400)
return ('OK', 200)
@server.route("/webapn/v<int:version>/log", methods=['GET', 'POST'])
def webapn_log(version):
if not webapn.is_valid_webapn_version(version):
return abort(404)
json = request.get_json(silent=True)
if json is None:
bugsnag.notify(
Exception("WebAPN exception"),
meta_data={"webapn_logs": {f"data": request.data}}
)
return ('', 204)
logs = json["logs"]
if server.debug:
print(json_dumps(logs))
if bugsnag.configuration.api_key is not None:
bugsnag.notify(
Exception("WebAPN exception"),
meta_data={"webapn_logs": {f"Log {i}": log for i, log in enumerate(logs)}}
)
return ('', 204)
|
461219
|
import numpy as np
def create_obstacles(sim_time, num_timesteps):
# Obstacle 1
v = -2
p0 = np.array([5, 12])
obst = create_robot(p0, v, np.pi/2, sim_time,
num_timesteps).reshape(4, num_timesteps, 1)
obstacles = obst
# Obstacle 2
v = 2
p0 = np.array([0, 5])
obst = create_robot(p0, v, 0, sim_time, num_timesteps).reshape(
4, num_timesteps, 1)
obstacles = np.dstack((obstacles, obst))
# Obstacle 3
v = 2
p0 = np.array([10, 10])
obst = create_robot(p0, v, -np.pi * 3 / 4, sim_time, num_timesteps).reshape(4,
num_timesteps, 1)
obstacles = np.dstack((obstacles, obst))
# Obstacle 4
v = 2
p0 = np.array([7.5, 2.5])
obst = create_robot(p0, v, np.pi * 3 / 4, sim_time, num_timesteps).reshape(4,
num_timesteps, 1)
obstacles = np.dstack((obstacles, obst))
return obstacles
def create_robot(p0, v, theta, sim_time, num_timesteps):
# Creates obstacles starting at p0 and moving at v in theta direction
t = np.linspace(0, sim_time, num_timesteps)
theta = theta * np.ones(np.shape(t))
vx = v * np.cos(theta)
vy = v * np.sin(theta)
v = np.stack([vx, vy])
p0 = p0.reshape((2, 1))
p = p0 + np.cumsum(v, axis=1) * (sim_time / num_timesteps)
p = np.concatenate((p, v))
return p
|
461252
|
from bson import ObjectId
from django.db.models import Model
from django.db.models.base import ModelState
from mongoengine import document as me
from mongoengine.base import metaclasses as mtc
from mongoengine.errors import FieldDoesNotExist
from .forms.document_options import DocumentMetaWrapper
from .queryset import QuerySetManager
def django_meta(meta, *top_bases):
class metaclass(meta):
def __new__(cls, name, bases, attrs):
change_bases = len(bases) == 1 and (bases[0].__name__ == "temporary_meta")
if change_bases:
new_bases = top_bases
else:
new_bases = ()
for b in bases:
if getattr(b, 'swap_base', False):
new_bases += top_bases
else:
new_bases += (b,)
new_cls = meta.__new__(cls, name, new_bases, attrs)
new_cls._meta = DocumentMetaWrapper(new_cls)
return new_cls
return type.__new__(metaclass, 'temporary_meta', (), {})
class DjangoFlavor(object):
objects = QuerySetManager()
_default_manager = QuerySetManager()
_get_pk_val = Model.__dict__["_get_pk_val"]
def __init__(self, *args, **kwargs):
self._state = ModelState()
self._state.db = self._meta.get("db_alias", me.DEFAULT_CONNECTION_NAME)
super(DjangoFlavor, self).__init__(*args, **kwargs)
def _get_unique_checks(self, exclude=None):
# XXX: source: django/db/models/base.py
# used in modelform validation
unique_checks, date_checks = [], []
return unique_checks, date_checks
def serializable_value(self, field_name):
"""
Returns the value of the field name for this instance. If the field is
a foreign key, returns the id value, instead of the object. If there's
no Field object with this name on the model, the model attribute's
value is returned directly.
Used to serialize a field's value (in the serializer, or form output,
for example). Normally, you would just access the attribute directly
and not use this method.
"""
try:
field = self._meta.get_field(field_name)
except FieldDoesNotExist:
return getattr(self, field_name)
value = field.to_mongo(getattr(self, field.name))
if isinstance(value, ObjectId):
return str(value)
return value
class Document(
django_meta(
mtc.TopLevelDocumentMetaclass,
DjangoFlavor,
me.Document,
)
):
swap_base = True
class DynamicDocument(
django_meta(mtc.TopLevelDocumentMetaclass, DjangoFlavor, me.DynamicDocument)
):
swap_base = True
class EmbeddedDocument(
django_meta(mtc.DocumentMetaclass, DjangoFlavor, me.EmbeddedDocument)
):
swap_base = True
class DynamicEmbeddedDocument(
django_meta(mtc.DocumentMetaclass, DjangoFlavor, me.DynamicEmbeddedDocument)
):
swap_base = True
|
461272
|
import datetime
import glob
import json
import os
import pathlib
import re
from typing import Any, Dict, List, Optional
from appdirs import AppDirs # type: ignore
import requests
import requests_cache # type: ignore
from tarsafe import TarSafe # type: ignore
from ochrona.log import OchronaLogger
# Cache settings
expire_after = datetime.timedelta(hours=1)
requests_cache.install_cache("db_cache", expire_after=expire_after)
RELEASES_URL = (
"https://api.github.com/repos/ochronasec/ochrona_python_vulnerabilities/releases"
)
VULN_PATTERN = "\.\/vulns\/({})[\-A-Z\-0-9]*\.json"
class VulnDB:
latest_version: Optional[str] = None
latest_db_path: Optional[str] = None
_user_app_dir: str = AppDirs("Ochrona", "Ochrona").user_data_dir
def __init__(self, logger: OchronaLogger):
self._logger = logger
self._create_user_app_dir()
self._check_local_db_present()
if self.latest_version is not None:
if self._is_update_available():
self._logger.debug(f"More recent version of DB found, will update")
self._update_db()
else:
self._download_latest_db()
def _is_update_available(self):
try:
releases = requests.get(RELEASES_URL).json()
top_version = max([r.get("name") for r in releases])
return top_version > self.latest_version
except Exception as ex:
self._logger.error(f"Error fetching new releases: {ex}")
return False
def _check_local_db_present(self):
files = glob.glob(f"{self.user_app_dir}/*tar.gz")
if len(files) > 0:
version = pathlib.Path(files[0]).name.replace(".tar.gz", "")
self.latest_version = version
self.latest_db_path = f"{self.user_app_dir}/{self.latest_version}.tar.gz"
self._logger.debug(f"DB instance found: {version}")
def _download_latest_db(self):
releases = requests.get(RELEASES_URL).json()
sorted_releases = sorted(releases, key=lambda r: r["name"], reverse=True)
r = requests.get(
sorted_releases[0].get("assets")[0].get("browser_download_url")
)
with open(
f"{self.user_app_dir}/{sorted_releases[0].get('assets')[0].get('name')}",
"wb",
) as f:
f.write(r.content)
self.latest_version = (
sorted_releases[0].get("assets")[0].get("name").replace(".tar.gz", "")
)
self.latest_db_path = f"{self.user_app_dir}/{self.latest_version}.tar.gz"
self._logger.debug(f"DB upgraded to {self.latest_version}")
def _create_user_app_dir(self):
os.makedirs(self.user_app_dir, exist_ok=True)
def _update_db(self):
self._delete_old_dbs()
self._download_latest_db()
def _delete_old_dbs(self):
files = glob.glob(f"{self.user_app_dir}/*tar.gz")
for file_ in files:
os.remove(file_)
def lookup_by_name(self, name: str) -> List[Dict[str, Any]]:
potential_vuln_paths = []
potential_vulns = []
with TarSafe.open(self.latest_db_path, "r:gz") as tar:
vulns = tar.getmembers()
for vuln in vulns:
if re.match(VULN_PATTERN.format(name), vuln.name):
potential_vuln_paths.append(vuln.name)
for vuln_path in potential_vuln_paths:
potential_vulns.append(json.loads(tar.extractfile(vuln_path).read()))
self._logger.debug(
f"Found {len(potential_vulns)} vulnerabilities potentially affecting package: {name}"
)
return potential_vulns
@property
def user_app_dir(self):
return self._user_app_dir
|
461331
|
from vocabulary import Vocab
import csv
tmp_Vocab = Vocab()
tmp_Vocab.count_file("../data/test/train.txt", add_eos=False)
tmp_Vocab.build_vocab()
with open('../data/test/label.tsv', 'wt') as f:
tsv_writer = csv.writer(f, delimiter='\t')
tsv_writer.writerow(['label', 'index'])
for i in range(len(tmp_Vocab.idx2sym)):
tsv_writer.writerow([tmp_Vocab.idx2sym[i], i])
# tsv_writer.writerow([tmp_Vocab.idx2sym[i]])
|
461344
|
import pytest
from testutils import get_co, get_bytecode
from equip import BytecodeObject, BlockVisitor
from equip.bytecode import MethodDeclaration, TypeDeclaration, ModuleDeclaration
from equip.bytecode.utils import show_bytecode
import equip.utils.log as logutils
from equip.utils.log import logger
logutils.enableLogger(to_file='./equip.log')
from equip.analysis import ControlFlow, BasicBlock
SIMPLE_PROGRAM = """
import random
import sys
a = lambda x, y: x + (y if foo == 'bar' else x)
def some_value(i):
if (i % 2) == 0:
print "even",
elif foobar:
print "whatever"
else:
print "odd",
for n in range(2, 10):
for x in range(2, n):
if n % x == 0:
print n, 'equals', x, '*', n/x
break
print "foobar"
else:
# loop fell through without finding a factor
print n, 'is a prime number'
print "number: %d" % i
return i - 1
def ask_ok(prompt, retries=4, complaint='Yes or no, please!'):
while True:
ok = raw_input(prompt)
if ok in ('y', 'ye', 'yes'):
return True
if ok in ('n', 'no', 'nop', 'nope'):
return False
print False
retries = retries - 1
if retries < 0:
raise IOError('refusenik user')
print "Never reached"
print complaint
if foobar:
print "whatever"
def with_stmt(something):
with open('output.txt', 'w') as f:
f.write('Hi there!')
def exception_tests():
try:
fd = open('something')
except SomeException, ex:
print "SomeException"
except Exception, ex:
print "Last Exception"
finally:
print "Finally"
def while_loop(data, start):
while start < len(data):
print start
start += 1
if start > 10:
return -1
def main():
for i in range(1, random.randint()):
print some_value(i)
print "Call stats:"
items = sys.callstats().items()
items = [(value, key) for key, value in items]
items.sort()
items.reverse()
for value,key in items:
print "%30s: %30s"%(key, value)
def return_Stmts(i):
if i == 1:
return 1
elif i == 2:
return 2
print "This is something else"
if __name__ == '__main__':
main()
"""
def test_block_visitor():
co_simple = get_co(SIMPLE_PROGRAM)
assert co_simple is not None
bytecode_object = BytecodeObject('<string>')
bytecode_object.parse_code(co_simple)
class BlockPrinterVisitor(BlockVisitor):
def __init__(self):
BlockVisitor.__init__(self)
def new_control_flow(self):
logger.debug("Received new CFG: %s", self.control_flow)
def visit(self, block):
logger.debug("Visiting block: %s", block)
logger.debug('\n' + show_bytecode(block.bytecode))
visitor = BlockPrinterVisitor()
bytecode_object.accept(visitor)
|
461358
|
from ._version import get_versions
__version__ = get_versions()["version"]
del get_versions
__author__ = "<NAME>, <NAME>"
__author_email__ = "<EMAIL>"
__license__ = "MIT"
__copyright__ = "Copyright (c) 2019-2020, %s." % __author__
__homepage__ = "https://github.com/PhoenixDL/rising"
# this has to be simple string, see: https://github.com/pypa/twine/issues/522
__docs__ = (
"rising is a highly performant, PyTorch only framework for "
"efficient data augmentation with support for volumetric data"
)
__long_docs__ = ""
try:
# This variable is injected in the __builtins__ by the build
# process. It used to enable importing subpackages of skimage when
# the binaries are not built
__RISING_SETUP__
except NameError:
__RISING_SETUP__ = False
if __RISING_SETUP__:
import sys # pragma: no-cover
sys.stdout.write(f"Partial import of `{__name__}` during the build process.\n") # pragma: no-cover
# We are not importing the rest of the lightning during the build process, as it may not be compiled yet
else:
import sys
if not sys.warnoptions:
import warnings
warnings.simplefilter("once")
from rising.interface import AbstractMixin
|
461360
|
from fac.commands import Command, Arg
from fac.utils import prompt
class RemoveCommand(Command):
"""Remove mods."""
name = 'remove'
arguments = [
Arg('mods', help="mod patterns to remove ('*' for all)", nargs='+'),
Arg('-y', '--yes', action='store_true',
help="automatic yes to confirmation prompt"),
Arg('-U', '--unpacked', action='store_false', dest='packed',
default=None, help="only remove unpacked mods"),
Arg('-P', '--packed', action='store_true', dest='packed',
default=None, help="only remove packed mods",),
]
def run(self, args):
mods = []
for mod_pattern in args.mods:
mod_pattern = self.manager.resolve_mod_name(mod_pattern)
matches = self.manager.find_mods(mod_pattern,
packed=args.packed)
mods.extend(matches)
if not matches:
print("No match found for %s." % mod_pattern)
if mods:
print("The following mods will be removed:")
for mod in mods:
print(" %s" % mod.location)
if not args.yes and prompt("Continue?", "Y/n") != "y":
return
for mod in mods:
mod.remove()
|
461370
|
from create_dataset import create_MIMIC_dataset, create_eICU_dataset
from dataframe_gen import preprocess
from numpy_convert import convert2numpy
from preprocess_utils import label_npy_file
import os
import argparse
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--data_input_path', type=str)
parser.add_argument('--data_output_path', type=str)
parser.add_argument('--max_length', type=int, default=150)
parser.add_argument('--min_length', type=int, default=5)
parser.add_argument('--window_time', type=int, default=12)
parser.add_argument('--data_type', type=str, choices=['MICU', 'TotalICU'], default='MICU')
return parser
def main():
args = get_parser().parse_args()
# file names
mimic_csv_files = {'lab':'LABEVENTS',
'med':'PRESCRIPTIONS',
'inf': 'INPUTEVENTS'}
eicu_csv_files = {'lab':'lab',
'med':'medication',
'inf':'infusionDrug'}
# definition file name
mimic_def_file = {'LABEVENTS':'D_LABITEMS',
'INPUTEVENTS_CV':'D_ITEMS',
'INPUTEVENTS_MV':'D_ITEMS'}
# columns_map
mimic_columns_map = {'LABEVENTS':
{'HADM_ID':'ID',
'CHARTTIME':'code_time',
'ITEMID':'code_name',
'VALUENUM':'value',
'VALUEUOM':'uom',
'FLAG':'issue'
},
'PRESCRIPTIONS':
{'HADM_ID':'ID',
'STARTDATE':'code_time',
'DRUG':'code_name',
'ROUTE':'route',
'PROD_STRENGTH':'prod',
'DOSE_VAL_RX':'value',
'DOSE_UNIT_RX':'uom',
},
'INPUTEVENTS':
{'HADM_ID':'ID',
'CHARTTIME':'code_time',
'ITEMID':'code_name',
'RATE':'value',
'RATEUOM':'uom',
'STOPPED':'issue'
}
}
eicu_columns_map = {'lab':
{'patientunitstayid':'ID',
'labresultoffset':'code_offset',
'labname':'code_name',
'labresult':'value',
'labmeasurenamesystem':'uom'
},
'medication':
{'patientunitstayid':'ID',
'drugstartoffset':'code_offset',
'drugname':'code_name',
'routeadmin':'route',
'ordercancelled':'issue'
},
'infusionDrug':
{'patientunitstayid':'ID',
'infusionoffset':'code_offset',
'drugname':'code_name',
'infusionrate':'value'
}
}
issue_map = {'LABEVENTS':
['abnormal'],
'INPUTEVENTS':
['Restart',
'NotStopd',
'Rewritten',
'Changed',
'Paused',
'Flushed',
'Stopped'
] ,
'medication':
['Yes'],
}
csv_files_dict = {'mimic':mimic_csv_files,
'eicu':eicu_csv_files
}
columns_map_dict = {'mimic':mimic_columns_map,
'eicu':eicu_columns_map
}
item_list = ['lab','med', 'inf']
wd = os.getcwd()
print('working directory .. : ', wd)
#create_MIMIC_dataset(os.path.join(args.data_input_path, 'mimic'))
#create_eICU_dataset(os.path.join(args.data_input_path, 'eicu'))
preprocess(args.data_input_path,
item_list,
csv_files_dict,
columns_map_dict,
issue_map,
mimic_def_file,
args.max_length,
args.data_type)
#convert2numpy(args.data_input_path, args.data_output_path)
#label_npy_file(args.data_input_path, args.data_output_path)
print('preprocess finish!!')
if __name__ == '__main__':
main()
|
461389
|
import pytest
from rest_framework import status
from rgd_imagery import models
@pytest.mark.django_db(transaction=True)
def test_download_image_file(admin_api_client, astro_image):
pk = astro_image.pk
response = admin_api_client.get(f'/api/rgd_imagery/{pk}/data')
assert status.is_redirect(response.status_code)
@pytest.mark.xfail
@pytest.mark.django_db(transaction=True)
def test_create_get_subsampled_image(admin_api_client, astro_image):
payload = {
'process_type': 'region',
'parameters': {
'sample_type': 'pixel box',
'right': 100,
'left': 0,
'top': 200,
'bottom': 0,
},
}
response = admin_api_client.post('/api/image_process/group', payload, format='json')
assert response.status_code == 201
assert response.data
group_id = response.data['id']
payload = {
'source_images': [
astro_image.pk,
],
'group': group_id,
}
response = admin_api_client.post('/api/image_process', payload, format='json')
assert response.status_code == 201
assert response.data
id = response.data['id']
sub = models.ProcessedImage.objects.get(pk=id)
assert sub.processed_image
# Test the GET
response = admin_api_client.get(f'/api/image_process/{id}')
assert response.status_code == 200
assert response.data
# Now test to make sure the serializer prevents duplicates
response = admin_api_client.post('/api/image_process', payload, format='json')
assert response.status_code == 201
assert response.data
assert id == response.data['id'] # Compare against original PK
@pytest.mark.xfail
@pytest.mark.django_db(transaction=True)
def test_create_and_download_cog(admin_api_client, geotiff_image_entry):
response = admin_api_client.post(
'/api/image_process/group',
{
'process_type': 'cog',
},
format='json',
)
assert response.status_code == 201
assert response.data
group_id = response.data['id']
response = admin_api_client.post(
'/api/image_process',
{
'source_images': [
geotiff_image_entry.id,
],
'group': group_id,
},
format='json',
)
assert response.status_code == 201
assert response.data
# Check that a COG was generated
cog = models.ProcessedImage.objects.get(pk=response.data['id'])
# NOTE: This doesn't actually verify the file is in COG format. Assumed.
assert cog.processed_image
# Also test download endpoint here:
pk = cog.pk
response = admin_api_client.get(f'/api/image_process/{pk}')
assert response.data
|
461408
|
from .models import Organization
from .models import Administrator
from .models import Recipient
from .models import Task
from .models import Address
from .models import Destination
from .models import Vehicle
from .models import Worker
from .onfleet import Onfleet
from .exceptions import OnfleetDuplicateKeyException
from .metadata import (
__author__,
__copyright__,
__email__,
__license__,
__maintainer__,
__version__,
)
__all__ = [
'__author__',
'__copyright__',
'__email__',
'__license__',
'__maintainer__',
'__version__',
'OnfleetDuplicateKeyException',
'Organization',
'Administrator',
'Recipient',
'Task',
'Address',
'Destination',
'Vehicle',
'Worker',
'Onfleet',
]
|
461418
|
import configparser
def read_config(config_text, schema=None):
"""Read options from ``config_text`` applying given ``schema``"""
schema = schema or {}
cfg = configparser.ConfigParser(
interpolation=configparser.ExtendedInterpolation()
)
try:
cfg.read_string(config_text)
except configparser.MissingSectionHeaderError:
config_text = '[main]\n' + config_text
cfg.read_string(config_text)
config = {}
for section in schema:
options = config.setdefault(section, {})
for option, option_schema in schema[section].items():
options[option] = option_schema.get('default')
for section in cfg.sections():
options = config.setdefault(section, {})
section_schema = schema.get(section, {})
for option in cfg.options(section):
option_schema = section_schema.get(option, {})
getter = 'get' + option_schema.get('type', '')
options[option] = getattr(cfg, getter)(section, option)
return config
config_text = '''
debug = true
[registry]
name = Alessandro
surname = Molina
[extra]
likes = spicy food
countrycode = 39
'''
config = read_config(config_text, {
'main': {
'debug': {'type': 'boolean'}
},
'registry': {
'name': {'default': 'unknown'},
'surname': {'default': 'unknown'},
'middlename': {'default': ''},
},
'extra': {
'countrycode': {'type': 'int'},
'age': {'type': 'int', 'default': 0}
},
'more': {
'verbose': {'type': 'int', 'default': 0}
}
})
import pprint
pprint.pprint(config)
|
461449
|
import pytest
from mktestdocs import check_codeblock, grab_code_blocks
exibit_a = """
This is an example docstring.
Arguments:
a: a parameter
There is no example
"""
exibit_b = """
This is an example docstring.
Arguments:
a: a parameter
```python
assert 1 == 1
```
"""
exibit_c = """
This is an example docstring.
Arguments:
a: a parameter
```
assert 1 == 1
```
```python
assert 1 == 1
```
"""
@pytest.mark.parametrize(
"doc, n",
[(exibit_a, 0), (exibit_b, 1), (exibit_c, 1)],
ids=["exibit_a", "exibit_b", "exibit_c"],
)
def test_number_of_codeblocks(doc, n):
assert len(grab_code_blocks(doc, lang="python")) == n
@pytest.mark.parametrize(
"doc, n",
[(exibit_a, 0), (exibit_b, 1), (exibit_c, 2)],
ids=["exibit_a", "exibit_b", "exibit_c"],
)
def test_number_of_codeblocks_any(doc, n):
assert len(grab_code_blocks(doc, lang=None)) == n
|
461466
|
from bs4 import BeautifulSoup
from sklearn.feature_extraction.stop_words import ENGLISH_STOP_WORDS
import re
import warnings
from nltk import stem
link_re = re.compile(r'\/{2}[\d\w-]+(\.[\d\w-]+)*(?:(?:\/[^\s/]*))*')
letter_re = re.compile(r"[^a-zA-Z]")
def normalize_and_remove_stop_words(raw_text, stem=False, **kwargs):
'''
Algorithm to convert raw text to a return a clean text string
Method modified from code available at:
https://www.kaggle.com/c/word2vec-nlp-tutorial/details/part-1-for-beginners-bag-of-words
Args:
raw_text: Original text to clean and normalize
Returns:
clean_text: Cleaned text, converted to lower case, with punctuation removed
and one space between each word.
'''
# 1. Remove web links
links_removed = remove_links(raw_text)
#
# 2. Remove HTML
#TODO Potentially look into using package other than BeautifulSoup for this step
# Suppress UserWarnings from BeautifulSoup due to text with tech info (ex: code, directory structure)
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=UserWarning)
review_text = BeautifulSoup(links_removed, "lxml").get_text()
#
# 3. Remove non-letters
letters_only = letter_re.sub(" ", review_text)
#
# 4. Convert to lower case, split into individual words
words = letters_only.lower().split()
#
# 5. Remove stop words
meaningful_words = remove_stop_words(words)
#6. stem if necessary
if stem: meaningful_words = porter_stem(meaningful_words)
#
# 7. Join the words back into one string separated by space,
# and return the result.
clean_text = ( " ".join( meaningful_words ))
return clean_text
def porter_stem(words):
stemmer = stem.PorterStemmer()
return [stemmer.stem(w) for w in words]
def xml_normalize(raw_text, stem=False, **kwargs ):
"""Alternative normalization: HTML/XML and URLs stripped out, lower-cased,
but stop words and punctuation remain."""
# 1. Remove web links
links_removed = remove_links(raw_text)
# 2. Remove HTML
#TODO Potentially look into using package other than BeautifulSoup for this step
# Suppress UserWarnings from BeautifulSoup due to text with tech info (ex: code, directory structure)
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=UserWarning)
review_text = BeautifulSoup(links_removed, "lxml").get_text()
# 3. Convert to lower-case
lower_case = review_text.lower().split()
# 4. Stemp if necessary
if stem: lower_case = porter_stem(lower_case)
#
# 7. Join the words back into one string separated by space,
# and return the result.
clean_text = ( " ".join( lower_case ))
return clean_text
def remove_links(text):
links_removed = link_re.sub('', text)
return links_removed
def remove_stop_words(words, stop_words=ENGLISH_STOP_WORDS):
"""Remove stop words from input"""
return [w for w in words if not w in stop_words]
|
461483
|
import sys
sys.path.append("../")
import json
import arbitrage
import time
from arbitrage.observers import observer
class TestObserver(observer.Observer):
def opportunity(
self,
profit,
volume,
buyprice,
kask,
sellprice,
kbid,
perc,
weighted_buyprice,
weighted_sellprice,
):
print("Time: %.3f" % profit)
def main():
arbitrer = arbitrage.Arbitrer()
depths = arbitrer.depths = json.load(open("speed-test.json"))
start_time = time.time()
testobs = TestObserver()
arbitrer.observers = [testobs]
arbitrer.arbitrage_opportunity(
"BitstampUSD", depths["BitstampUSD"]["asks"][0], "KrakenEUR", depths["KrakenEUR"]["asks"][0]
)
# FIXME: add asserts
elapsed = time.time() - start_time
print("Time: %.3f" % elapsed)
if __name__ == "__main__":
main()
|
461488
|
import lark
class NType:
def __init__(self, name):
self.name = name
def __hash__(self):
return hash(id(self))
def __eq__(self, other):
return self is other
class NGenericType(NType):
def __init__(self, name):
super(NGenericType, self).__init__(name)
def __repr__(self):
return "NGenericType(%s)" % repr(self.name)
class NAliasType(NType):
def __init__(self, name, alias_type, typevars=None):
super(NAliasType, self).__init__(name)
self.typevars = typevars or []
self.type = alias_type
def with_typevars(self, typevar_defs=None):
if typevar_defs is None:
typevar_defs = []
if len(self.typevars) != len(typevar_defs):
raise TypeError(
"Expected %d typevars, not %d."
% (len(self.typevars), len(typevar_defs))
)
return apply_generics_to(
self.type,
{
typevar: typevar_def
for typevar, typevar_def in zip(self.typevars, typevar_defs)
},
)
class NTypeVars(NType):
def __init__(self, name, typevars=None, original=None):
super(NTypeVars, self).__init__(name)
self.typevars = typevars or []
# Keep a reference to the original NTypeVars so that types can be
# compared by reference
self.base_type = original or self
def with_typevars(self, typevars):
if len(self.typevars) != len(typevars):
raise TypeError(
"Expected %d typevars, not %d." % (len(self.typevars), len(typevars))
)
return self.new_child(typevars)
def new_child(self, typevars):
return type(self)(self.name, typevars, original=self.base_type)
def is_type(self, other):
return isinstance(other, NTypeVars) and self.base_type is other.base_type
def __hash__(self):
if self.base_type is self:
return hash(id(self))
else:
return hash(self.base_type)
def __eq__(self, other):
return (
isinstance(other, NTypeVars)
and self.base_type is other.base_type
and self.typevars == other.typevars
)
def __repr__(self):
return "NTypeVars(%s, %s)" % (repr(self.name), repr(self.typevars))
# N modules are kind of like records but different
class NModule(dict):
def __init__(self, name, *args, types=None, **kw):
super(NModule, self).__init__(*args, **kw)
self.mod_name = name
self.types = types if types is not None else {}
# Prevent destructuring modules completely. This hidden internal field
# should never be shown to the casual N programmer.
self["not exhaustive"] = True
# Module unit test wrapper
class NModuleWrapper:
def __init__(self, mod):
self.mod = mod
# Classes are records but should have custom name displayed
class NClass(dict):
def __init__(self, name, *args, **kw):
super(NClass, self).__init__(*args, **kw)
self.class_name = name
"""
`expected` is the type of the function's argument, the type with the
generics/type variables.
`actual` is the type of the expression being passed in, the type with the
generics known.
Pass in `generics` to keep track of the generics across different matches.
Returns a type with the generics swapped out to best fit the actual type. For
example, `apply_generics(list[t], list[str])` (psuedocode) will return
`list[str]`. This can then be compared with actual separately.
"""
def apply_generics(expected, actual, generics=None):
if generics is None:
generics = {}
if isinstance(expected, NGenericType):
generic = generics.get(expected)
if generic is None:
generics[expected] = "none" if actual is None else actual
return actual
elif generic == "none":
generic = None
if isinstance(generic, NGenericType) and not isinstance(actual, NGenericType):
generics[expected] = actual
return actual
else:
return generic
elif isinstance(expected, NTypeVars) and isinstance(actual, NTypeVars):
if expected.base_type is actual.base_type:
return expected.with_typevars(
[
apply_generics(expected_type, actual_type, generics)
for expected_type, actual_type in zip(
expected.typevars, actual.typevars
)
]
)
elif isinstance(expected, tuple) and isinstance(actual, tuple):
return tuple(
apply_generics(expected_arg, actual_arg, generics)
for expected_arg, actual_arg in zip(expected, actual)
)
elif isinstance(expected, list) and isinstance(actual, list):
return [
apply_generics(expected_item, actual_item, generics)
for expected_item, actual_item in zip(expected, actual)
]
elif (
isinstance(expected, dict)
and not isinstance(expected, NModule)
and isinstance(actual, dict)
and not isinstance(actual, NModule)
):
return {
key: apply_generics(expected_type, actual[key], generics)
if key in actual
else expected_type
for key, expected_type in expected.items()
}
return expected
"""
Given generic mappings (eg `t` -> `str`) from `apply_generics`, it'll transform
the given type by replacing the generic types according to the mapping. For
example, `apply_generics_to(list[t], { t: str })` (psuedocode) will return
`list[str]`.
Note that this currently probably should fail for functions that only use a
generic in its return type, but I'm not sure how that would work.
"""
def apply_generics_to(return_type, generics):
if isinstance(return_type, NGenericType):
generic = generics.get(return_type)
if generic is None:
return return_type
else:
return generic
if isinstance(return_type, NTypeVars):
return return_type.with_typevars(
[apply_generics_to(typevar, generics) for typevar in return_type.typevars]
)
elif isinstance(return_type, tuple):
return tuple(apply_generics_to(arg_type, generics) for arg_type in return_type)
elif isinstance(return_type, list):
return [apply_generics_to(item_type, generics) for item_type in return_type]
elif isinstance(return_type, dict) and not isinstance(return_type, NModule):
return {
key: apply_generics_to(field_type, generics)
for key, field_type in return_type.items()
}
else:
return return_type
"""
Given two types that should be equal, this function will try to match them and
resolve generics, then return a pair containing the resolved type and whether
there is a problem.
Examples:
- resolve_equal_types((str, int, bool), (str, int, bool)) -> (str, int, bool), False
- resolve_equal_types(list[t], list[str]) -> list[str], False
- resolve_equal_types(list[t], str) -> None, True
- resolve_equal_types(list[t], list[b]) -> list[b], False if t is from the base type
- resolve_equal_types(list[t], list[t]) -> list[t], False
- resolve_equal_types(list[a], list[b]) -> None, True if neither a nor b are from the base type
If a type is None (error), the function will return None, False to avoid
compounding errors.
- resolve_equal_types(None, int) -> None, False
"""
def resolve_equal_special_types(type_a, type_b):
if isinstance(type_a, NTypeVars):
if (
not isinstance(type_b, NTypeVars)
or type_a.base_type is not type_b.base_type
):
return None, True
base_type = type_a.base_type
resolved_typevars = []
for typevar_a, typevar_b in zip(type_a.typevars, type_b.typevars):
if isinstance(typevar_a, NGenericType) and isinstance(typevar_b, NGenericType):
resolved_typevars.append(typevar_a)
elif isinstance(typevar_a, NGenericType):
resolved_typevars.append(typevar_b)
elif (
isinstance(typevar_b, NGenericType) and typevar_b in base_type.typevars
):
resolved_typevars.append(typevar_a)
else:
resolved, problem = resolve_equal_types(typevar_a, typevar_b)
if problem:
return None, True
resolved_typevars.append(resolved)
return type_a.with_typevars(resolved_typevars), False
elif isinstance(type_a, list) or isinstance(type_b, tuple):
if type_a is None or type_b is None or (not isinstance(type_b, list) and not isinstance(type_b, tuple)) or (not isinstance(type_a, list) and not isinstance(type_a, tuple)) or len(type_a) != len(type_b):
return None, True
resolved_types = []
for item_a, item_b in zip(type_a, type_b):
resolved, problem = resolve_equal_types(item_a, item_b)
if problem:
return None, True
resolved_types.append(resolved)
if isinstance(type_b, tuple):
resolved_types = tuple(resolved_types)
return resolved_types, False
elif isinstance(type_a, dict) and not isinstance(type_a, NModule):
if (
not isinstance(type_b, dict)
or isinstance(type_b, NModule)
or type_a.keys() != type_b.keys()
):
return None, True
resolved_types = {}
for key in type_a.keys():
resolved, problem = resolve_equal_types(type_a[key], type_b[key])
if problem:
return None, True
resolved_types[key] = resolved
return resolved_types, False
return None
def resolve_equal_types(type_a, type_b):
type_a_special_resolved = resolve_equal_special_types(type_a, type_b)
type_b_special_resolved = resolve_equal_special_types(type_b, type_a)
if type_a is None or type_b is None:
return None, False
elif type_a_special_resolved:
return type_a_special_resolved
elif type_b_special_resolved:
return type_b_special_resolved
elif type_a == type_b:
return type_a, False
else:
return None, True
|
461512
|
import pandas as pd
import numpy as np
from Event import Event
from Team import Team
from Constant import Constant
class Game:
"""A class for keeping info about the games"""
def __init__(self, path_to_json, event_index=0):
# self.events = None
self.home_team = None
self.guest_team = None
self.event = None
self.path_to_json = path_to_json
self.event_index = event_index
def read_json(self):
data_frame = pd.read_json(self.path_to_json)
last_default_index = len(data_frame) - 1
self.event_index = min(self.event_index, last_default_index)
index = self.event_index
print(Constant.MESSAGE + str(last_default_index))
event = data_frame['events'][index]
self.event = Event(event)
self.home_team = Team(event['home']['teamid'])
self.guest_team = Team(event['visitor']['teamid'])
def start(self):
self.event.show()
def get_feature_dict(self):
return self.event.get_features_dict()
def _convert_feature_dict_pos_matrix(self, feature_dict, node_names=None):
if node_names is None:
node_names = feature_dict.keys()
N, T = len(node_names), feature_dict.values()[0].shape[0]
pos_matrix = np.zeros((T, N, 2))
for i, node_name in enumerate(node_names):
pos_matrix[:, i, :] = feature_dict[node_name][:, :2]
return pos_matrix
def get_pos_matrix(self, return_feature_dict=False):
feature_dict = self.get_feature_dict()
if return_feature_dict:
return self._convert_feature_dict_pos_matrix(feature_dict), feature_dict
else:
return self._convert_feature_dict_pos_matrix(feature_dict)
def get_st_graph_info(self, robot_node_name):
pos_matrix, feature_dict = self.get_pos_matrix(return_feature_dict=True)
node_names = feature_dict.keys()
type_list = [self.event.player_types[node_name] for node_name in node_names]
robot_node_type = self.event.player_types[robot_node_name]
return pos_matrix[0], type_list, node_names, robot_node_name, robot_node_type
|
461513
|
import numpy as np
from SafePDP import SafePDP
from SafePDP import PDP
from JinEnv import JinEnv
from casadi import *
import scipy.io as sio
import matplotlib.pyplot as plt
from colour import Color
import time
import random
from matplotlib import cm
from ControlTools import ControlTools
# --------------------------- load environment ----------------------------------------
env = JinEnv.CartPole()
mc, mp, l = 0.5, 0.5, 1
env.initDyn(mc=mc, mp=mp, l=l)
wx, wq, wdx, wdq, wu = 0.1, 1, 0.1, 0.1, 0.1
env.initCost(wx=wx, wq=wq, wdx=wdx, wdq=wdq, wu=wu)
max_x = 1
max_u = 4
env.initConstraints(max_u=4, max_x=max_x)
dt = 0.12
horizon = 25
init_state = [0, 0, 0, 0]
dyn = env.X + dt * env.f
time_grid = np.arange(0, horizon+1)
# --------------------------- basic plot setting ----------------------------------------
params = {'axes.labelsize': 25,
'axes.titlesize': 25,
'xtick.labelsize': 20,
'ytick.labelsize': 20,
'legend.fontsize': 16}
plt.rcParams.update(params)
# ----------- Plot the comparison between the Safe PDP and ALTRO results -------------
if True:
# load safe motion planning results
load = np.load('./SPlan_Cartpole_trial_2.npy', allow_pickle=True).item()
safe_loss_trace = load['loss_trace']
safe_parameter_trace = load['parameter_trace']
safe_gamma = load['gamma']
safe_max_iter = safe_parameter_trace.shape[0]
safe_init_parameter = load['init_parameter']
# create safe policy optimization object
safe_planner = SafePDP.CSysOPT()
safe_planner.setStateVariable(env.X)
safe_planner.setControlVariable(env.U)
safe_planner.setDyn(dyn)
safe_planner.setPathCost(env.path_cost)
safe_planner.setFinalCost(env.final_cost)
safe_planner.setPathInequCstr(env.path_inequ)
safe_planner.convert2BarrierOC(gamma=safe_gamma)
# set the poly policy
safe_planner.setPolyTraj(horizon=horizon, n_poly=load['n_poly'])
# load altro motion planning results
altro_load = np.load('./ALTRO_Cartpole_trial_1.npy', allow_pickle=True).item()
altro_loss_trace = altro_load['loss_trace']
altro_control_traj_trace = altro_load['control_traj_trace']
altro_max_outer = altro_load['max_outer']
altro_max_inner=altro_load['max_inner']
# create PDP policy optimization object
altro = ControlTools.ALTRO()
altro.setStateVariable(env.X)
altro.setControlVariable(env.U)
altro.setDyn(dyn)
altro.setPathCost(env.path_cost)
altro.setFinalCost(env.final_cost)
altro.setPathConstraint(env.path_inequ)
altro.diffSys()
# --------------------------- plot comparison result ----------------------------------------
fig = plt.figure(0, figsize=(8.5, 5.0))
ax = fig.subplots(2, 2)
# plot the safe PDP results
iter_index_vec = [k for k in range(0, 1000, 30)]
iter_index_vec += [k for k in range(1000,3000, 1000)]
colors = list(Color("lightblue").range_to(Color("royalblue"), len(iter_index_vec)))
colors2=list(Color("navajowhite").range_to(Color("darkorange"), len(iter_index_vec)))
for i, iter_k in enumerate(iter_index_vec):
# safe policy
safe_state_traj, safe_control_traj, _, _, = safe_planner.integrateSys(init_state=init_state, horizon=horizon,
control_auxvar_value=safe_parameter_trace[
iter_k, :])
ax[0,0].plot(time_grid[0:-1], safe_control_traj, color=colors[i].hex, )
ax[1,0].plot(time_grid, safe_state_traj[:,0], color=colors2[i].hex, )
# legend
safe_state_trajs, safe_control_trajs, _, _, = safe_planner.integrateSys(init_state=init_state, horizon=horizon,
control_auxvar_value=safe_parameter_trace[
0, :])
safe_state_trajf, safe_control_trajf, _, _, = safe_planner.integrateSys(init_state=init_state, horizon=horizon,
control_auxvar_value=safe_parameter_trace[
-1, :])
line_safe_control_s, = ax[0,0].plot(time_grid[0:-1], safe_control_trajs, color=colors[0].hex, zorder=-100, linewidth=3)
line_safe_control_f, = ax[0,0].plot(time_grid[0:-1], safe_control_trajf, color=colors[-1].hex, zorder=100, linewidth=3)
line_safe_state_s, = ax[1,0].plot(time_grid, safe_state_trajs[:,0], color=colors2[0].hex, zorder=-100, linewidth=3)
line_safe_state_f, = ax[1,0].plot(time_grid, safe_state_trajf[:,0], color=colors2[-1].hex, zorder=100, linewidth=3)
ax[0,0].legend([line_safe_control_s, line_safe_control_f],
['Iter. #0', 'Iter. #3000', ], ncol=2, prop={'size': 15},
columnspacing=0.5, handlelength=1).set_zorder(-102)
ax[1, 0].legend([line_safe_state_s, line_safe_state_f],
['Iter. #0', 'Iter. #3000', ], ncol=2, prop={'size': 15},
columnspacing=0.5, handlelength=1).set_zorder(-102)
# plot the ALTRO results
iter_index_vec = [k for k in range(0, 300, 8)]
iter_index_vec+=[k for k in range(300, 3000, 1000)]
colors = list(Color("lightblue").range_to(Color("royalblue"), len(iter_index_vec)))
colors2=list(Color("navajowhite").range_to(Color("darkorange"), len(iter_index_vec)))
for i, iter_k in enumerate(iter_index_vec):
altro_control_traj=altro_control_traj_trace[iter_k]
sol = altro.integrateSys(init_state, altro_control_traj)
altro_state_traj=sol['state_traj']
ax[0, 1].plot(time_grid[0:-1], altro_control_traj, color=colors[i].hex, )
ax[1, 1].plot(time_grid, altro_state_traj[:, 0], color=colors2[i].hex)
# legend
altro_sols = altro.integrateSys(init_state, altro_control_traj_trace[0])
altro_solf = altro.integrateSys(init_state, altro_control_traj_trace[-1])
line_altro_control_s, = ax[0,1].plot(time_grid[0:-1], altro_control_traj_trace[0], color=colors[0].hex, zorder=-100, linewidth=3)
line_altro_control_f, = ax[0,1].plot(time_grid[0:-1], altro_control_traj_trace[-1], color=colors[-1].hex, zorder=100, linewidth=3)
line_altro_state_s, = ax[1,1].plot(time_grid, altro_sols['state_traj'][:,0], color=colors2[0].hex, zorder=-100, linewidth=3)
line_altro_state_f, = ax[1,1].plot(time_grid, altro_solf['state_traj'][:,0], color=colors2[-1].hex, zorder=100, linewidth=3)
ax[0,1].legend([line_altro_control_s, line_altro_control_f],
['Iter. #0', 'Iter. #3000', ], ncol=2, prop={'size': 15},
columnspacing=0.5, handlelength=1).set_zorder(-102)
ax[1, 1].legend([line_altro_state_s, line_altro_state_f],
['Iter. #0', 'Iter. #3000', ], ncol=2, prop={'size': 15},
columnspacing=0.5, handlelength=1).set_zorder(-102)
ax[0,0].plot(time_grid, max_u * np.ones_like(time_grid), '--', linewidth=4, color='black')
ax[0,0].plot(time_grid, -max_u * np.ones_like(time_grid), '--', linewidth=4, color='black')
ax[0,1].plot(time_grid, max_u * np.ones_like(time_grid), '--', linewidth=4, color='black')
ax[0,1].plot(time_grid, -max_u * np.ones_like(time_grid), '--', linewidth=4, color='black')
ax[0,0].fill_between(time_grid, max_u, -max_u, color='#EFEFEF', alpha=1)
ax[0,1].fill_between(time_grid, max_u, -max_u, color='#EFEFEF', alpha=1)
ax[1,0].fill_between(time_grid, max_x, -max_x, color='#EFEFEF', alpha=1)
ax[1,1].fill_between(time_grid, max_x, -max_x, color='#EFEFEF', alpha=1)
ax[1,0].plot(time_grid, max_x * np.ones_like(time_grid), '--', linewidth=4, color='black')
ax[1,0].plot(time_grid, -max_x * np.ones_like(time_grid), '--', linewidth=4, color='black')
ax[1,1].plot(time_grid, max_x * np.ones_like(time_grid), '--', linewidth=4, color='black')
ax[1,1].plot(time_grid, -max_x * np.ones_like(time_grid), '--', linewidth=4, color='black')
ax[0,0].set_ylabel('Control', labelpad=0)
ax[1,0].set_ylabel('Cart pos.', labelpad=0)
# ax[0].set_xlabel('Time')
# ax[0].tick_params(axis='x', which='major', pad=10)
# ax[0].tick_params(axis='y', which='major', pad=10)
# ax[0].set_xlim([0, 3])
ax[0,0].set_ylim([-7, 9])
ax[0,1].set_ylim([-7, 9])
ax[1,0].set_ylim([-2, 2.5])
ax[1,1].set_ylim([-2, 2.5])
ax[0, 0].set_xlim([0, horizon])
ax[0, 1].set_xlim([0, horizon])
ax[1, 0].set_xlim([0, horizon])
ax[1, 1].set_xlim([0, horizon])
ax[0, 0].set_xticks(np.arange(0,horizon+1,5))
ax[0, 1].set_xticks(np.arange(0,horizon+1,5))
plt.setp(ax[0,1].get_yticklabels(), visible=False)
plt.setp(ax[1,1].get_yticklabels(), visible=False)
plt.setp(ax[0,0].get_xticklabels(), visible=False)
plt.setp(ax[0,1].get_xticklabels(), visible=False)
plt.text(-6.81, 6.2, r'$u_{max}$', fontsize=25, fontweight="bold", color='black')
plt.text(-6.81, 4.0, r'$u_{min}$', fontsize=25, fontweight="bold", color='black')
plt.text(-6.81, 0.8, r'$x_{max}$', fontsize=25, fontweight="bold", color='black')
plt.text(-6.81, -1.3, r'$x_{min}$', fontsize=25, fontweight="bold", color='black')
ax[1,0].set_xticks(np.arange(0,horizon+1,5))
ax[1,1].set_xticks(np.arange(0,horizon+1,5))
ax[1,0].set_xlabel(r'Time $t$')
ax[1,1].set_xlabel(r'Time $t$')
# ax[1].tick_params(axis='x', which='major', pad=10)
# ax[1,0].set_ylim([-2, 3])
# ax[1,1].set_ylim([-2, 3])
# ax[1].plot(time_grid, max_u * np.ones_like(time_grid), '--', linewidth=3, color='red')
# ax[1].plot(time_grid, -max_u * np.ones_like(time_grid), '--', linewidth=3, color='red')
ax[0,0].set_title(r'Safe PDP, $\epsilon=10^{-2}$', pad=15)
ax[0,1].set_title('ALTRO', pad=15)
ax[0,0].grid(alpha=0.5)
ax[0,1].grid(alpha=0.5)
ax[1, 0].grid(alpha=0.5)
ax[1, 1].grid(alpha=0.5)
#
plt.subplots_adjust(left=0.10, right=0.98, bottom=0.15, top=0.89, wspace=0.30, hspace=0.2)
plt.show()
# ------------Plot multiple trials of the safe PO results-----------------------------
if False:
# load safe motion planning results
params = {'axes.labelsize': 28,
'axes.titlesize': 28,
'xtick.labelsize': 22,
'ytick.labelsize': 22,
'legend.fontsize': 16}
plt.rcParams.update(params)
loss_trace_list = []
for j in range(1, 2):
load = np.load('./SPlan_Cartpole_trial_' + str(j) + '.npy', allow_pickle=True).item()
safe_loss_trace = load['loss_trace']
loss_trace_list += [safe_loss_trace]
# plot
fig = plt.figure(0, figsize=(5.5, 5.5))
ax = fig.subplots(1, 1)
for loss_trace in loss_trace_list:
ax.plot(loss_trace, color=[0.6350, 0.0780, 0.1840], linewidth=4, )
ax.set_xlim(0, 2000)
# ax.set_ylim(100, 300)
# ax.tick_params(axis='x', which='major', pad=10)
# ax.tick_params(axis='y', which='major', pad=10)
ax.set_xlabel('Iteration', labelpad=0)
ax.set_ylabel('Planning loss', labelpad=0)
ax.set_facecolor('#E6E6E6')
ax.grid()
ax.set_position([0.19, 0.13, 0.73, 0.81])
# ax.set_title('Convergence of Safe PDP', pad=25)
ax.set_xticks(np.arange(0, 2001, 500))
plt.show()
# ------------Plot the results of the PDP under different gamma (barrier paramter)-----------------
if True:
# load safe policy optimization results
params = {'axes.labelsize': 28,
'axes.titlesize': 28,
'xtick.labelsize': 22,
'ytick.labelsize': 22,
'legend.fontsize': 16}
plt.rcParams.update(params)
loss_trace_list = []
for j in range(0, 3):
load = np.load('./SPlan_Cartpole_trial_' + str(j) + '.npy', allow_pickle=True).item()
safe_loss_trace = load['loss_trace']
loss_trace_list += [safe_loss_trace]
print(load['gamma'])
# plot
fig = plt.figure(0, figsize=(5.5, 5.5))
ax = fig.subplots(1, 1)
gamma_0,= ax.plot(loss_trace_list[0], color='tab:green', linewidth=4, )
gamma_1,= ax.plot(loss_trace_list[1], color='tab:brown', linewidth=4, )
gamma_2,= ax.plot(loss_trace_list[2], color='tab:red', linewidth=4, )
ax.legend([gamma_0, gamma_1, gamma_2],
[r'$\epsilon=1$', r'$\epsilon=10^{-1}$', r'$\epsilon=10^{-2}$', ], ncol=1, prop={'size': 25}, columnspacing=0.5, handlelength=1).set_zorder(-102)
ax.set_xlim(0, 3000)
# ax.set_ylim(100, 300)
ax.set_xlabel('Iteration', labelpad=0)
ax.set_ylabel('Loss (planning loss)', labelpad=0)
ax.set_facecolor('#E6E6E6')
ax.grid()
ax.set_position([0.21, 0.13, 0.72, 0.78])
# ax.set_title('Convergence of Safe PDP', pad=25)
ax.set_xticks(np.arange(0, 3001, 1000))
plt.show()
|
461543
|
import torch
import torchtestcase
import unittest
import copy
from survae.nn.layers.autoregressive import MaskedLinear
from survae.nn.nets.autoregressive import MADE, AgnosticMADE
class MADETest(torchtestcase.TorchTestCase):
def test_shape(self):
batch_size = 16
features = 10
hidden_features = 5*[50]
num_params = 3
inputs = torch.randn(batch_size, features)
for random_order, random_mask in [(False, False),
(False, True),
(True, False),
(True, True)]:
with self.subTest(random_order=random_order,
random_mask=random_mask):
model = MADE(
features=features,
num_params=num_params,
hidden_features=hidden_features,
random_order=random_order,
random_mask=random_mask,
)
outputs = model(inputs)
self.assertEqual(outputs.dim(), 3)
self.assertEqual(outputs.shape[0], batch_size)
self.assertEqual(outputs.shape[1], features)
self.assertEqual(outputs.shape[2], num_params)
def test_total_mask_sequential(self):
features = 10
hidden_features = 5*[50]
num_params = 1
model = MADE(
features=features,
num_params=num_params,
hidden_features=hidden_features,
random_order=False,
random_mask=False,
)
total_mask = None
for module in model.modules():
if isinstance(module, MaskedLinear):
if total_mask is None:
total_mask = module.mask
else:
total_mask = module.mask @ total_mask
total_mask = (total_mask > 0).float()
reference = torch.tril(torch.ones([features, features]), -1)
self.assertEqual(total_mask, reference)
def test_total_mask_random(self):
features = 10
hidden_features = 5*[50]
num_params = 1
model = MADE(
features=features,
num_params=num_params,
hidden_features=hidden_features,
random_order=False,
random_mask=True,
)
total_mask = None
for module in model.modules():
if isinstance(module, MaskedLinear):
if total_mask is None:
total_mask = module.mask
else:
total_mask = module.mask @ total_mask
total_mask = (total_mask > 0).float()
self.assertEqual(torch.triu(total_mask), torch.zeros([features, features]))
def test_autoregressive_type_A(self):
batch_size = 16
features = 10
hidden_features = 2*[50]
num_params = 3
x = torch.randn(batch_size, features)
x_altered = copy.deepcopy(x)
x_altered[:,2] += 100.0 # Alter feature number 2
for random_mask in [True, False]:
with self.subTest(random_mask=random_mask):
module = MADE(
features=features,
num_params=num_params,
hidden_features=hidden_features,
random_order=False,
random_mask=random_mask,
)
y = module(x)
y_altered = module(x_altered)
# Assert all elements up to (and including) 2 are unaltered
self.assertEqual(y[:,:3], y_altered[:,:3])
# Assert all elements from 2 are altered
self.assertFalse((y[:,3:] == y_altered[:,3:]).view(-1).all())
class AgnosticMADETest(torchtestcase.TorchTestCase):
def test_shape(self):
batch_size = 16
features = 10
hidden_features = 5*[50]
num_params = 3
inputs = torch.randn(batch_size, features)
for order_agnostic, connect_agnostic in [(False, False),
(False, True),
(True, False),
(True, True)]:
with self.subTest(order_agnostic=order_agnostic,
connect_agnostic=connect_agnostic):
model = AgnosticMADE(
features=features,
num_params=num_params,
hidden_features=hidden_features,
order_agnostic=order_agnostic,
connect_agnostic=connect_agnostic,
num_masks=16,
)
outputs = model(inputs)
self.assertEqual(outputs.dim(), 3)
self.assertEqual(outputs.shape[0], batch_size)
self.assertEqual(outputs.shape[1], features)
self.assertEqual(outputs.shape[2], num_params)
def test_autoregressive_type_A(self):
batch_size = 16
features = 10
hidden_features = 2*[50]
num_params = 3
x = torch.randn(batch_size, features)
x_altered = copy.deepcopy(x)
x_altered[:,2] += 100.0 # Alter feature number 2
for connect_agnostic in [True, False]:
with self.subTest(connect_agnostic=connect_agnostic):
module = AgnosticMADE(
features=features,
num_params=num_params,
hidden_features=hidden_features,
order_agnostic=False,
connect_agnostic=connect_agnostic,
num_masks=2,
)
y = module(x) # Call with mask 0, mask updated to 1
_ = module(x) # Call with mask 1, mask updated to 0
y_altered = module(x_altered) # Call with mask 0, mask updated to 1
# Assert all elements up to (and including) 2 are unaltered
self.assertEqual(y[:,:3], y_altered[:,:3])
# Assert all elements from 2 are altered
self.assertFalse((y[:,3:] == y_altered[:,3:]).view(-1).all())
def test_connect_agnostic(self):
batch_size = 16
features = 10
hidden_features = 2*[50]
num_params = 3
x = torch.randn(batch_size, features)
x_altered = copy.deepcopy(x)
x_altered[:,2] += 100.0 # Alter feature number 2
for order_agnostic, connect_agnostic in [(False, True),
(True, False),
(True, True)]:
with self.subTest(order_agnostic=order_agnostic,
connect_agnostic=connect_agnostic):
module = AgnosticMADE(
features=features,
num_params=num_params,
hidden_features=hidden_features,
order_agnostic=order_agnostic,
connect_agnostic=connect_agnostic,
num_masks=2,
)
y = module(x) # Call with mask 0, mask updated to 1
y_mask1 = module(x) # Call with mask 1, mask updated to 0
y_mask0 = module(x) # Call with mask 0, mask updated to 1
# Assert elements same for same mask
self.assertTrue((y == y_mask0).view(-1).all())
# Assert some elements different for different mask
self.assertTrue((y != y_mask1).view(-1).any())
if __name__ == '__main__':
unittest.main()
|
461563
|
import uuid
from office365.sharepoint.fields.field import Field
from office365.sharepoint.fields.field_creation_information import FieldCreationInformation
from office365.sharepoint.fields.field_type import FieldType
from office365.sharepoint.views.view_field_collection import ViewFieldCollection
from tests import create_unique_name
from tests.sharepoint.sharepoint_case import SPTestCase
from office365.sharepoint.changes.change_query import ChangeQuery
from office365.sharepoint.lists.list import List
from office365.sharepoint.lists.list_creation_information import ListCreationInformation
from office365.sharepoint.lists.list_template_type import ListTemplateType
from office365.sharepoint.views.view import View
from office365.sharepoint.views.view_create_information import ViewCreationInformation
class TestSPView(SPTestCase):
target_list = None # type: List
target_view = None # type: View
target_field = None # type: Field
view_fields_count = None
@classmethod
def setUpClass(cls):
super(TestSPView, cls).setUpClass()
cls.target_list = cls.ensure_list(cls.client.web,
ListCreationInformation("Tasks",
None,
ListTemplateType.Tasks)
)
field_info = FieldCreationInformation("TaskComment_" + uuid.uuid4().hex, FieldType.Note)
cls.target_field = cls.target_list.fields.add(field_info).execute_query()
@classmethod
def tearDownClass(cls):
cls.target_list.delete_object().execute_query()
def test1_get_all_views(self):
all_views = self.target_list.views.get().execute_query()
self.assertGreater(len(all_views), 1)
def test2_create_view(self):
view_properties = ViewCreationInformation()
view_properties.Title = create_unique_name("My Tasks")
view_properties.PersonalView = True
view_properties.Query = "<Where><Eq><FieldRef ID='AssignedTo' /><Value " \
"Type='Integer'><UserID/></Value></Eq></Where> "
new_view = self.target_list.views.add(view_properties).execute_query()
self.assertEqual(view_properties.Title, new_view.properties['Title'])
self.__class__.target_view = new_view
def test3_read_view(self):
view_to_read = self.__class__.target_view.get().execute_query()
self.assertIsNotNone(view_to_read.resource_path)
def test4_render_as_html(self):
result = self.__class__.target_view.render_as_html().execute_query()
self.assertIsNotNone(result.value)
def test5_get_default_view_items(self):
view_items = self.target_list.default_view.get_items().get().execute_query()
self.assertIsNotNone(view_items.resource_path)
def test6_get_view_items(self):
view_items = self.__class__.target_view.get_items().get().execute_query()
self.assertIsNotNone(view_items.resource_path)
def test7_update_view(self):
title_updated = self.__class__.target_view.properties["Title"] + "_updated"
view_to_update = self.__class__.target_view
view_to_update.set_property('Title', title_updated).update().execute_query()
result = self.target_list.views.filter("Title eq '{0}'".format(title_updated)).get().execute_query()
self.assertEqual(len(result), 1)
def test8_get_view_fields(self):
view = self.__class__.target_view.expand(["ViewFields"]).get().execute_query()
self.assertIsNotNone(view.view_fields)
self.assertIsInstance(view.view_fields, ViewFieldCollection)
self.__class__.view_fields_count = len(view.view_fields)
def test9_add_view_field(self):
field_name = self.__class__.target_field.internal_name
self.__class__.target_view.view_fields.add_view_field(field_name).execute_query()
after_view_fields = self.__class__.target_view.view_fields.get().execute_query()
self.assertEqual(self.__class__.view_fields_count + 1, len(after_view_fields))
def test_10_move_view_field_to(self):
field_name = self.__class__.target_field.internal_name
self.__class__.target_view.view_fields.move_view_field_to(field_name, 2).execute_query()
after_view_fields = self.__class__.target_view.view_fields.get().execute_query()
self.assertEqual(after_view_fields[2], field_name)
def test_11_remove_view_field(self):
field_name = self.__class__.target_field.internal_name
self.__class__.target_view.view_fields.remove_view_field(field_name).execute_query()
after_view_fields = self.__class__.target_view.view_fields.get().execute_query()
self.assertEqual(self.__class__.view_fields_count, len(after_view_fields))
def test_12_remove_all_view_fields(self):
self.__class__.target_view.view_fields.remove_all_view_fields().execute_query()
after_view_fields = self.__class__.target_view.view_fields.get().execute_query()
self.assertEqual(0, len(after_view_fields))
def test_13_get_view_changes(self):
changes = self.client.site.get_changes(ChangeQuery(view=True)).execute_query()
self.assertGreater(len(changes), 0)
def test_14_delete_view(self):
view_to_delete = self.__class__.target_view
view_to_delete.delete_object().execute_query()
|
461615
|
def is_a_url(path):
return (path is not None and isinstance(path, str) and
(path.startswith('http://') or
path.startswith('https://'))
)
def tabular(descriptor):
return 'schema' in descriptor
def streaming(descriptor):
return descriptor.get(PROP_STREAMING)
def streamable(descriptor):
return PROP_STREAMED_FROM in descriptor and \
not streaming(descriptor)
def get_path(descriptor):
path = descriptor.get('path')
if isinstance(path, str):
return path
if isinstance(path, list):
if len(path) > 0:
return path.pop(0)
else:
return None
assert path is None, '%r' % path
return None
PATH_PLACEHOLDER = '_'
PROP_STREAMED_FROM = 'dpp:streamedFrom'
PROP_STREAMING = 'dpp:streaming'
|
461626
|
import json
from .oauth import OAuth2Test
from social_core.backends.paypal import PayPalOAuth2
class PayPalOAuth2Test(OAuth2Test):
backend_path = 'social_core.backends.paypal.PayPalOAuth2'
user_data_url = (
'https://api.paypal.com/v1/identity/oauth2/userinfo?schema=paypalv1.1'
)
expected_username = 'm<PASSWORD>HdPxJRrhGHrnMJ-1PQKtX6<PASSWORD>'
access_token_body = json.dumps(
{
'token_type': 'Bearer',
'expires_in': 28800,
'refresh_token': '<PASSWORD>',
'access_token': '<PASSWORD>',
}
)
user_data_body = json.dumps(
{
'user_id': 'https://www.paypal.com/webapps/auth/identity/user/mWq6_1sU85v5EG9yHdPxJRrhGHrnMJ-1PQKtX6pcsmA',
'name': 'identity test',
'given_name': 'identity',
'family_name': 'test',
'payer_id': 'WDJJHEBZ4X2LY',
'address': {
'street_address': '1 Main St',
'locality': 'San Jose',
'region': 'CA',
'postal_code': '95131',
'country': 'US',
},
'verified_account': True,
'emails': [{'value': '<EMAIL>', 'primary': True}],
}
)
refresh_token_body = json.dumps(
{
'access_token': '<PASSWORD>',
'token_type': 'Bearer',
'refresh_token': '<PASSWORD>',
'expires_in': 28800,
}
)
def test_login(self):
self.do_login()
def test_partial_pipeline(self):
self.do_partial_pipeline()
def test_refresh_token(self):
user, social = self.do_refresh_token()
self.assertEqual(user.username, self.expected_username)
self.assertEqual(social.extra_data['access_token'], 'foobar-new-token')
def test_get_email_no_emails(self):
emails = []
email = PayPalOAuth2.get_email(emails)
self.assertEqual(email, '')
def test_get_email_multiple_emails(self):
expected_email = '<EMAIL>'
emails = [
{'value': '<EMAIL>', 'primary': False},
{'value': expected_email, 'primary': True},
]
email = PayPalOAuth2.get_email(emails)
self.assertEqual(email, expected_email)
def test_get_email_multiple_emails_no_primary(self):
expected_email = '<EMAIL>'
emails = [
{'value': expected_email, 'primary': False},
{'value': '<EMAIL>', 'primary': False},
]
email = PayPalOAuth2.get_email(emails)
self.assertEqual(email, expected_email)
|
461660
|
from office365.sharepoint.base_entity import BaseEntity
class AppPrincipalIdentityProvider(BaseEntity):
pass
|
461697
|
from stix_shifter_utils.stix_transmission.utils.RestApiClient import RestApiClient
import json
import hashlib
class APIClient():
def __init__(self, connection, configuration):
# Uncomment when implementing data source API client.
auth_values = configuration.get('auth')
auth = (auth_values['username'], auth_values['password'])
headers = dict()
headers['Accept'] = 'application/json'
connection['host'] = 'data.reversinglabs.com'
url_modifier_function = None
self.client = RestApiClient(host=connection.get('host'),
port=None,
headers=headers,
url_modifier_function=url_modifier_function,
auth=auth
)
self.connection = connection
self.namespace = connection.get('namespace')
def ping_reversinglabs(self):
endpoint = 'api/uri/statistics/uri_state/sha1/4b84b15bff6ee5796152495a230e45e3d7e947d9?format=json'
response = self.client.call_api(endpoint, 'GET')
json_data = json.loads(response.read().decode('utf-8'))
if response.code == 200:
status_code = 200
return json_data, status_code
def get_search_results(self, query_expression, range_start=None, range_end=None):
# Return the search results. Results must be in JSON format before being translated into STIX
# query_expression = (json.loads(query_expression))
data_type = query_expression['dataType']
data = query_expression['data']
uri = get_uri_sha1(data)
if data_type == 'ip' or data_type == 'domain':
endpoint_uri_state = f'api/uri/statistics/uri_state/sha1/{uri}?format=json'
uri_state = self.client.call_api(endpoint_uri_state, 'GET')
json_data_uri_state = json.loads(uri_state.read().decode('utf-8')) if uri_state.code == 200 else {}
if uri_state.code == 200:
json_data_uri_state['namespace'] = self.namespace
return json_data_uri_state, uri_state.code
else:
json_data_uri_state['error'] = uri_state.read().decode('utf-8')
json_data_uri_state['code'] = uri_state.code
json_data_uri_state['indicator_types'] = [ "unknown" ]
json_data_uri_state['description'] = uri_state.read().decode('utf-8')
json_data_uri_state['namespace'] = self.namespace
return json_data_uri_state, uri_state.code
elif data_type == 'url':
post_body = json.dumps({
"rl": {
"query": {
"url": data,
"response_format": "json"
}
}
})
endpoint_url = 'api/networking/url/v1/report/query/json'
url_response = self.client.call_api(endpoint_url, 'POST', data = post_body)
json_data_url = json.loads(url_response.read().decode('utf-8')) if url_response.code == 200 else {}
if url_response.code == 200:
status_code = 200
json_data_url['namespace'] = self.namespace
return json_data_url, status_code
else:
json_data_url['error'] = url_response.read().decode('utf-8')
json_data_url['code'] = url_response.code
json_data_url['indicator_types'] = ["unknown"]
json_data_url['description'] = url_response.read().decode('utf-8')
json_data_url['namespace'] = self.namespace
return json_data_url, url_response.code
elif data_type == 'hash':
HASH_LENGTH = {'40': 'sha1', '64': 'sha256', '32': 'md5'}
hash_type = HASH_LENGTH.get(str(len(data)), '')
endpoint_malware_presence = f'api/databrowser/malware_presence/query/{hash_type}/{data}?format=json&extended=true'
malware_presence = self.client.call_api(endpoint_malware_presence, 'GET')
json_data_malware_presence = json.loads(malware_presence.read().decode('utf-8')) if malware_presence.code == 200 else {}
if malware_presence.code == 200:
status_code = 200
json_data_malware_presence['namespace'] = self.namespace
return json_data_malware_presence, status_code
else:
json_data_malware_presence['error'] = malware_presence.read().decode('utf-8')
json_data_malware_presence['code'] = malware_presence.code
json_data_malware_presence['indicator_types'] = ["unknown"]
json_data_malware_presence['description'] = malware_presence.read().decode('utf-8')
json_data_malware_presence['namespace'] = self.namespace
return json_data_malware_presence, malware_presence.code
else:
return {"code": 401, "error": "IoC Type not supported"}
# return response
def delete_search(self, search_id):
# Optional since this may not be supported by the data source API
# Delete the search
return {"code": 200, "success": True}
def get_uri_sha1(uri):
uri_bytes = bytes(uri, "utf-8")
hash_object = hashlib.sha1(uri_bytes)
uri_hash = hash_object.hexdigest()
return uri_hash
|
461710
|
import json
import os
import numpy as np
import torch
from PIL import Image
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from tqdm import tqdm
from utils.utils import cvtColor, preprocess_input, resize_image
from yolo import YOLO
#---------------------------------------------------------------------------#
# map_mode用于指定该文件运行时计算的内容
# map_mode为0代表整个map计算流程,包括获得预测结果、计算map。
# map_mode为1代表仅仅获得预测结果。
# map_mode为2代表仅仅获得计算map。
#---------------------------------------------------------------------------#
map_mode = 0
#-------------------------------------------------------#
# 指向了验证集标签与图片路径
#-------------------------------------------------------#
cocoGt_path = 'coco_dataset/annotations/instances_val2017.json'
dataset_img_path = 'coco_dataset/val2017'
#-------------------------------------------------------#
# 结果输出的文件夹,默认为map_out
#-------------------------------------------------------#
temp_save_path = 'map_out/coco_eval'
class mAP_YOLO(YOLO):
#---------------------------------------------------#
# 检测图片
#---------------------------------------------------#
def detect_image(self, image_id, image, results):
#---------------------------------------------------#
# 计算输入图片的高和宽
#---------------------------------------------------#
image_shape = np.array(np.shape(image)[0:2])
#---------------------------------------------------------#
# 在这里将图像转换成RGB图像,防止灰度图在预测时报错。
# 代码仅仅支持RGB图像的预测,所有其它类型的图像都会转化成RGB
#---------------------------------------------------------#
image = cvtColor(image)
#---------------------------------------------------------#
# 给图像增加灰条,实现不失真的resize
# 也可以直接resize进行识别
#---------------------------------------------------------#
image_data = resize_image(image, (self.input_shape[1],self.input_shape[0]), self.letterbox_image)
#---------------------------------------------------------#
# 添加上batch_size维度
#---------------------------------------------------------#
image_data = np.expand_dims(np.transpose(preprocess_input(np.array(image_data, dtype='float32')), (2, 0, 1)), 0)
with torch.no_grad():
images = torch.from_numpy(image_data)
if self.cuda:
images = images.cuda()
#---------------------------------------------------------#
# 将图像输入网络当中进行预测!
#---------------------------------------------------------#
outputs = self.net(images)
outputs = self.bbox_util.decode_box(outputs)
#---------------------------------------------------------#
# 将预测框进行堆叠,然后进行非极大抑制
#---------------------------------------------------------#
outputs = self.bbox_util.non_max_suppression(torch.cat(outputs, 1), self.num_classes, self.input_shape,
image_shape, self.letterbox_image, conf_thres = self.confidence, nms_thres = self.nms_iou)
if outputs[0] is None:
return results
top_label = np.array(outputs[0][:, 6], dtype = 'int32')
top_conf = outputs[0][:, 4] * outputs[0][:, 5]
top_boxes = outputs[0][:, :4]
for i, c in enumerate(top_label):
result = {}
top, left, bottom, right = top_boxes[i]
result["image_id"] = int(image_id)
result["category_id"] = clsid2catid[c]
result["bbox"] = [float(left),float(top),float(right-left),float(bottom-top)]
result["score"] = float(top_conf[i])
results.append(result)
return results
if __name__ == "__main__":
if not os.path.exists(temp_save_path):
os.makedirs(temp_save_path)
cocoGt = COCO(cocoGt_path)
ids = list(cocoGt.imgToAnns.keys())
clsid2catid = cocoGt.getCatIds()
if map_mode == 0 or map_mode == 1:
yolo = mAP_YOLO(confidence = 0.001, nms_iou = 0.65)
with open(os.path.join(temp_save_path, 'eval_results.json'),"w") as f:
results = []
for image_id in tqdm(ids):
image_path = os.path.join(dataset_img_path, cocoGt.loadImgs(image_id)[0]['file_name'])
image = Image.open(image_path)
results = yolo.detect_image(image_id, image, results)
json.dump(results, f)
if map_mode == 0 or map_mode == 2:
cocoDt = cocoGt.loadRes(os.path.join(temp_save_path, 'eval_results.json'))
cocoEval = COCOeval(cocoGt, cocoDt, 'bbox')
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
print("Get map done.")
|
461752
|
r"""
NI IO Trace can be used to troubleshoot & debug the setup. It should be installed
when the NI-DAQmx driver is installed.
PyDAQmx parses the NIDAQmx.h header to build ctypes wrappers for all function,
constants, etc. It also wraps the functions which return errors codes to raise
exceptions (and warnings) based on the return value.
https://www.ni.com/en-au/support/downloads/drivers/download.ni-daqmx.html#409845
API Reference manual:
https://zone.ni.com/reference/en-XX/help/370471AM-01/
C:\Program Files (x86)\National Instruments\NI-DAQ\DAQmx ANSI C Dev\include\NIDAQmx.h
C:\Program Files\National Instruments\NI-DAQ\DAQmx ANSI C Dev\include\NIDAQmx.h
"""
from collections import namedtuple
from fixate.core.common import ExcThread
from queue import Queue, Empty
from ctypes import byref, c_char_p
import numpy
# Basic Functions
from PyDAQmx import (
DAQmxResetDevice,
TaskHandle,
int32,
uInt8,
float64,
uInt64,
uInt32,
)
# Tasks
from PyDAQmx import (
DAQmxCreateTask,
DAQmxStartTask,
DAQmxWaitUntilTaskDone,
DAQmxStopTask,
DAQmxClearTask,
)
# Channels
from PyDAQmx import (
DAQmxCreateDOChan,
DAQmxCreateDIChan,
DAQmxReadDigitalLines,
DAQmxWriteDigitalLines,
DAQmx_Val_GroupByScanNumber,
DAQmx_Val_ChanPerLine,
DAQmxReadCounterScalarF64,
DAQmx_Val_Rising,
DAQmx_Val_Seconds,
DAQmxCfgSampClkTiming,
DAQmx_Val_FiniteSamps,
)
# Two Edge Separation
from PyDAQmx import (
DAQmxCreateCITwoEdgeSepChan,
DAQmxSetCITwoEdgeSepFirstTerm,
DAQmxGetCITwoEdgeSepFirstTerm,
DAQmxSetCITwoEdgeSepSecondTerm,
DAQmxGetCITwoEdgeSepSecondTerm,
DAQmx_Val_Falling,
)
# Signal Routing
from PyDAQmx import (
DAQmxConnectTerms,
DAQmxDisconnectTerms,
DAQmxTristateOutputTerm,
DAQmx_Val_InvertPolarity,
DAQmx_Val_DoNotInvertPolarity,
)
from fixate.core.exceptions import InstrumentError, ParameterError
IORange = namedtuple("IORange", ["port", "range_start", "range_end"])
IORange.__new__.__defaults__ = (0, None, None)
IOLine = namedtuple("IOLine", ["port", "line"])
IOLine.__new__.__defaults__ = (0, None)
class DaqTask:
""" """
task_state = ""
task = None
def read(self):
raise NotImplementedError("Read not available for this Task")
def write(self, data):
raise NotImplementedError("Write not available for this Task")
def trigger(self):
raise NotImplementedError("Trigger not available for this Task")
def init(self):
"""
This method should be overridden to create the task
:return:
"""
def stop(self):
if self.task_state == "running":
DAQmxStopTask(self.task)
self.task_state = "stopped"
def clear(self):
self.stop()
if self.task_state != "":
DAQmxClearTask(self.task)
self.task = None
self.task_state = ""
def start(self):
if self.task_state == "running":
return
if self.task_state == "":
self.init()
DAQmxStartTask(self.task)
self.task_state = "running"
class DigitalOut(DaqTask):
""" """
def __init__(self, task_string, io_length):
self.io_length = io_length
self.task_string = task_string
def init(self):
if self.task_state == "":
self.task = TaskHandle()
DAQmxCreateTask(b"", byref(self.task))
DAQmxCreateDOChan(self.task, self.task_string, b"", DAQmx_Val_ChanPerLine)
self.task_state = "init"
if self.task_state in ["init", "stopped"]:
self.start()
def read(self):
self.init()
data_arr = numpy.zeros(self.io_length, uInt8)
samples_per_chan = int32()
num_bytes_per_sample = int32()
DAQmxReadDigitalLines(
self.task,
1, # Samples per channel
2.0, # Timeout
DAQmx_Val_GroupByScanNumber, # Interleaved
data_arr,
len(data_arr),
byref(samples_per_chan),
byref(num_bytes_per_sample),
None,
)
return data_arr
def write(self, data):
"""
Data must be an iterable like a list of 1s and 0s
Data is grouped by scan number. Each element in the array will write to each line in the digital output until
exhausted and then will start from the beginning for the next sample. Sample rate is as set in creating the IO
task.
"""
self.init()
try:
if len(data) % self.io_length:
raise ValueError(
"data must be a length divisible by {}".format(self.io_length)
)
data_arr = numpy.zeros(len(data), uInt8)
data_arr[:] = data
except TypeError:
if self.io_length != 1:
raise ValueError(
"data must be a list of length divisible by {}".format(
self.io_length
)
)
data_arr = numpy.zeros(1, uInt8)
data_arr[:] = [data]
written = int32()
DAQmxWriteDigitalLines(
self.task,
len(data_arr) // self.io_length, # Samples per channel
1, # Autostart task
2.0, # Timeout
DAQmx_Val_GroupByScanNumber, # Interleaved
data_arr,
written,
None,
)
class DigitalIn(DaqTask):
""" """
def __init__(self, task_string, io_length):
self.io_length = io_length
self.task_string = task_string
def init(self):
if self.task_state == "":
self.task = TaskHandle()
DAQmxCreateTask(b"", byref(self.task))
DAQmxCreateDIChan(self.task, self.task_string, b"", DAQmx_Val_ChanPerLine)
self.task_state = "init"
if self.task_state in ["init", "stopped"]:
self.start()
def read(self):
self.init()
data_arr = numpy.zeros(self.io_length, uInt8)
samples_per_chan = int32()
num_bytes_per_sample = int32()
DAQmxReadDigitalLines(
self.task,
1, # Samples per channel
2.0, # Timeout
DAQmx_Val_GroupByScanNumber, # Interleaved
data_arr,
len(data_arr),
byref(samples_per_chan),
byref(num_bytes_per_sample),
None,
)
return data_arr
class BufferedWrite(DaqTask):
""" """
def __init__(self, task_string, io_length, frequency):
self.task_string = task_string
self.io_length = io_length
self.frequency = frequency
def init(self):
if self.task_state == "":
self.task = TaskHandle()
DAQmxCreateTask(b"", byref(self.task))
DAQmxCreateDOChan(self.task, self.task_string, b"", DAQmx_Val_ChanPerLine)
self.task_state = "init"
def write(self, data):
"""
The task should be in stopped state when calling write, it automatically starts the task through the
DAQmxWriteDigitalLines call. When write is finished it is back in a stopped state
:param data:
:return:
"""
self.init()
try:
if len(data) % self.io_length:
raise ValueError(
"data must be a length divisible by {}".format(self.io_length)
)
except TypeError as e:
raise ValueError(
"data must be in an list divisible by {}".format(self.io_length)
) from e
if len(data) == self.io_length:
# Sample clock only works for more than one sample so duplicate the sample
data = list(data)
data.extend(data)
DAQmxCfgSampClkTiming(
self.task,
None,
float64(self.frequency),
DAQmx_Val_Rising,
DAQmx_Val_FiniteSamps,
uInt64(int(len(data) // self.io_length)),
)
try:
data_arr = numpy.zeros((len(data)), uInt8)
data_arr[:] = data
written = int32()
DAQmxWriteDigitalLines(
self.task,
int(len(data) // self.io_length),
1,
-1,
DAQmx_Val_GroupByScanNumber,
data_arr,
written,
None,
)
self.task_state = "running"
DAQmxWaitUntilTaskDone(self.task, -1)
if written.value != len(data) // self.io_length:
raise InstrumentError("Values not written correctly")
finally:
self.stop()
class TwoEdgeSeparation(DaqTask):
_data = float64()
_trigger_thread = None
def __init__(
self,
device_name,
counter_chan,
min_val,
max_val,
first_edge_type,
second_edge_type,
source_terminal,
destination_terminal,
):
self.device_name = device_name
self.counter_chan = counter_chan
self.min_val = min_val
self.max_val = max_val
self.first_edge_type = first_edge_type
self.second_edge_type = second_edge_type
self.source_terminal = source_terminal
self.destination_terminal = destination_terminal
self._error_queue = Queue()
self._thread_timeout = 10
def init(self):
if self.task_state == "":
self.task = TaskHandle()
DAQmxCreateTask(b"", byref(self.task))
DAQmxCreateCITwoEdgeSepChan(
self.task,
"{}/{}".format(self.device_name, self.counter_chan).encode(),
b"",
float64(self.min_val),
float64(self.max_val),
DAQmx_Val_Seconds,
self.first_edge_type,
self.second_edge_type,
b"",
)
if self.source_terminal:
tmp_data = c_char_p(self.source_terminal.encode())
DAQmxSetCITwoEdgeSepFirstTerm(
self.task,
"{}/{}".format(self.device_name, self.counter_chan).encode(),
tmp_data,
)
if self.destination_terminal:
tmp_data = c_char_p(self.destination_terminal.encode())
DAQmxSetCITwoEdgeSepSecondTerm(
self.task,
"{}/{}".format(self.device_name, self.counter_chan).encode(),
tmp_data,
)
self.task_state = "init"
def read(self):
self._trigger_thread.join(self._thread_timeout)
if self._trigger_thread.is_alive():
raise InstrumentError("Trigger thread failed to terminate")
try:
err = self._error_queue.get_nowait()
except Empty:
# no error in queue
pass
else:
raise err
# TODO: consider making this return self._data.value. We should return a python
# float object, not a ctypes.c_double
return self._data
def _read(self):
try:
DAQmxReadCounterScalarF64(
self.task, float64(self._thread_timeout), byref(self._data), None
)
except Exception as e:
self._error_queue.put(ThreadError(e))
return
def trigger(self):
if self._trigger_thread:
self.clear()
self._trigger_thread.join(self._thread_timeout)
if self._trigger_thread.is_alive():
raise InstrumentError("Existing Trigger Event in Progress")
self.init()
self._trigger_thread = ExcThread(target=self._read)
self._trigger_thread.start()
class ThreadError(Exception):
"""
give a name to an error that came from a thread
"""
pass
class DaqMx:
"""
Implements the digital input and output functions of the National Instruments DAQ
usage:
daq = DaqMx()
# Create a digital output from port 0 line 2 to line 4 named 'P0.2:4'
daq.create_digital_output('P0.2:4', port=0, range_start=2, length=3)
# Create a digital output with default port 0, at line 7 named 'reset'
daq.create_digital_output('reset', 7)
# Create a digital input at port 0 line 1
daq.create_digital_input('P0.1', range_start=1)
# This example assumes that port 0 line 1 is shorted to port 0 line 7 named reset
daq.start()
print("Port 7:", daq["reset"], "Echo Port:", daq["P0.1"])
>>>'Port 7: [0] Echo Port: [0]'
daq["P0.7"] = 1 # or True or '1' or [1]
print("Port 7:", daq["reset"], "Echo Port:", daq["P0.1"])
>>>'Port 7: [1] Echo Port: [1]'
print(daq["P0.2:4"])
>>>'[0 0 0]'
daq["P0.2:4"] = [0, 1, 0] # Need to assign all values if initialised as multiple
print(daq["P0.2:4"])
>>>'[0 1 0]'
daq.stop()
"""
def __init__(self):
self.device_name = "Dev1"
self.tasks = {}
self.reset()
self.triggers = {}
def reset(self):
DAQmxResetDevice(self.device_name.encode())
for _, task in self.tasks.items():
task.task_state = ""
def signal_route(
self,
source_terminal,
destination_terminal,
disconnect=False,
tri_state=False,
invert=False,
):
"""
Immediately routes a signal between two terminals
Set destination_terminal to '' if tri_state output is required on the source_terminal
terminals are PFI X as they are the programmable terminals.
See NI-MAX Device Routes for available terminal names.
Leave out the device name
eg. /Dev 1/PFI0 would be PFI0
"""
source_terminal = "/{}/{}".format(self.device_name, source_terminal).encode()
destination_terminal = "/{}/{}".format(
self.device_name, destination_terminal
).encode()
if disconnect:
DAQmxDisconnectTerms(source_terminal, destination_terminal)
elif tri_state:
DAQmxTristateOutputTerm(source_terminal)
else:
if invert:
invert = DAQmx_Val_InvertPolarity
else:
invert = DAQmx_Val_DoNotInvertPolarity
DAQmxConnectTerms(source_terminal, destination_terminal, invert)
def create_two_edge_separation(
self,
ident,
counter_chan,
min_val,
max_val,
first_edge_type,
second_edge_type,
source_terminal=None,
destination_terminal=None,
):
"""
Returns the two edge separation of two signals
:param ident:
Identification string used for reading the data via
daq = DaqMx()
daq.create_two_edge_separation(ident, **params)
daq.trigger_measurement(ident)
# Do stuff
# Read the edge separation after causing the event
edge_sep = daq[ident]
:param counter_chan:
For X-Series DAQs PCI
'ctr0', 'ctr1', 'ctr2', 'ctr3' where the connected terminals are:
Start = "AUX", Stop = "GATE"
ctr0 ctr1 ctr2 ctr3
Start: PFI 10 Pin45 Start: PFI 11 Pin46 Start: PFI 2 Pin43 Start: PFI 7 Pin38
Stop: PFI 9 Pin3 Stop: PFI 4 Pin41 Stop: PFI 1 Pin10 Stop: PFI 6 Pin5
:param min_val:
The minimum value, in units, that you expect to measure.
eg. 0.0001
:param max_val:
The maximum value, in units, that you expect to measure.
eg. 0.83
:param first_edge_type:
The start trigger on the first edge
"rising" or "falling"
:param second_edge_type:
The stop trigger on the second edge
"rising" or "falling"
:param source_terminal
:param destination_terminal
Override the default counter terminals.
eg.
ctr0
eg. source_terminal = "PFI14" will make the Start pin as PFI 14 in stead of 10
"""
if counter_chan not in ["ctr0", "ctr1", "ctr2", "ctr3"]:
raise ValueError("Invalid counter channel selected")
if first_edge_type.lower() == "falling":
first_edge_type = DAQmx_Val_Falling
else:
first_edge_type = DAQmx_Val_Rising
if second_edge_type.lower() == "falling":
second_edge_type = DAQmx_Val_Falling
else:
second_edge_type = DAQmx_Val_Rising
self.tasks[ident] = TwoEdgeSeparation(
self.device_name,
counter_chan,
min_val,
max_val,
first_edge_type,
second_edge_type,
source_terminal,
destination_terminal,
)
def trigger_measurement(self, ident):
try:
self.tasks[ident].trigger()
except KeyError as e:
raise ValueError("{} is not a valid task".format(ident)) from e
def create_buffered_write(self, ident, frequency, *dio_ranges):
"""
Sets up the ranges to synchronize when writing to output at a specified frequency.
This will force each write to the output for this ident to contain the amount of samples specified.
eg.
>>>daq = DaqMx()
# Setup output @ 100Hz, 3 samples on port0 line 7 and 9
>>>daq.create_buffered_write("MyOutput", 100, (0, 7, 7), (0, 9, 9))
3 samples over 2 lines is 6 data values.
>>>daq["MyOutput"] = [0 ,0, 1, 1, 0, 1]
it is interleaved so it is written [line7, line9, line7, line9, line7, line9]
Requires ports that enable buffered writes.
In the X-Series daq this is port 0
This disables reading from the output port for these pins.
:param ident
The identification used to access this message
:param frequency
The sample frequency for writing
:type frequency integer or float
:param io_ranges
:type (port, line_start, line_end)
:param samples
The amount of samples that are required for each digital output write
"""
if ident in self.tasks:
raise ParameterError("Ident {} already used".format(ident))
do_channel, data_length = self._build_digital_task_string(*dio_ranges)
self.tasks[ident] = BufferedWrite(
task_string=do_channel, io_length=data_length, frequency=frequency
)
def _build_digital_task_string(self, *dio_ranges):
"""
:param dio_ranges:
each dio_range is a tuple of ('port', 'range_start', 'range_end') or an IORange instance.
:return:
The string used to create the task by connecting each of the ports togeter
"""
data_length = 0
task_arr = []
for rng in dio_ranges:
task_arr.append(self.device_name + "/port{}/line{}:{}".format(*rng))
data_length += rng[2] - rng[1] + 1 # range end - range start + 1
return ", ".join(task_arr).encode(), data_length
def create_digital_output(self, ident, *dio_ranges):
"""
:param dio_ranges
each dio_range is a tuple of ('port', 'range_start', 'range_end') or an IORange instance.
A digital output is created in the order of the dio_ranges and can be accessed by the ident key.
>>>daq = DaqMx()
>>>rng_1 = IORange(0, 7, 9) # Port 0 line 7 to line 9
>>>rng_2 = IORange(0, 11,11) # Port 0 line 11
>>>daq.create_digital_output("MyOut", rng_1, rng_2)
>>>daq["MyOut"] = [0, 1, 0, 1] # Port 0 Line 8 and 11 high
>>>print(daq["MyOut"]) # Read back the value
>>>[0, 1, 0, 1]
"""
if ident in self.tasks:
raise ParameterError("Ident {} already used".format(ident))
task_string, data_length = self._build_digital_task_string(*dio_ranges)
self.tasks[ident] = DigitalOut(task_string, io_length=data_length)
def create_digital_input(self, ident, *dio_ranges):
"""
:param dio_ranges
each dio_range is a tuple of ('port', 'range_start', 'range_end') or an IORange instance.
A digital output is created in the order of the dio_ranges and can be accessed by the ident key.
>>>daq = DaqMx()
>>>rng_1 = IORange(0, 7, 9) # Port 0 line 7 to line 9
>>>rng_2 = IORange(0, 11,11) # Port 0 line 11
>>>daq.create_digital_input("MyOut", rng_1, rng_2)
>>>print(daq["MyOut"]) # Tie Port 0 line 8 and line 11 high
>>>[0, 1, 0, 1]
"""
if ident in self.tasks:
raise ParameterError("Ident {} already used".format(ident))
task_string, data_length = self._build_digital_task_string(*dio_ranges)
self.tasks[ident] = DigitalIn(task_string, io_length=data_length)
def __getitem__(self, ident):
return self.read(ident)
def __setitem__(self, ident, data):
self.write(ident, data)
def write(self, ident, value):
try:
return self.tasks[ident].write(value)
except KeyError:
raise KeyError("{} is not a valid identifier".format(ident))
def read(self, ident):
try:
return self.tasks[ident].read()
except KeyError:
raise KeyError(
"{} is not a valid identifier\nAvailable tasks: {}".format(
ident, sorted(self.tasks)
)
)
def start_task(self, ident):
"""
:param ident:
:return:
"""
self.tasks[ident].start()
def stop_task(self, ident):
"""
Stops a task to be
:param ident:
:return:
"""
self.tasks[ident].stop()
def clear_task(self, ident):
"""
Stops a task and clear up the resources allocated to the
:param ident:
:return:
"""
self.tasks[ident].clear()
|
461762
|
import random
import threading
import psutil
def display_cpu():
global running
running = True
currentProcess = psutil.Process()
# start loop
while running:
print(currentProcess.cpu_percent(interval=1))
def start():
global t
# create thread and start it
t = threading.Thread(target=display_cpu)
t.start()
def stop():
global running
global t
# use `running` to stop loop in thread so thread will end
running = False
# wait for thread's end
t.join()
# ---
def i_hate_this():
tab = []
for i in range(1000000):
tab.append(random.randint(1, 10000))
tab.sort()
return tab
# ---
start()
try:
result = i_hate_this()
finally: # stop thread even if I press Ctrl+C
stop()
|
461794
|
import asyncio
from asyncio import Queue
from aioweb3 import AsyncWeb3
async def producer(queue: Queue, jobs: int):
print(f"Enqueue {jobs} jobs")
for index in range(jobs):
await queue.put(index)
async def consumer(queue: Queue, index: int):
def log(*args, **kwargs):
print(f"Worker {index}: ", *args, **kwargs)
log("Boot")
web3 = AsyncWeb3()
while 1:
await queue.get()
block = await web3.eth.get_block("latest")
log("latest block: ", block.number)
queue.task_done()
async def entrypoint(jobs: int = 5_000, workers: int = 50):
queue = Queue()
producers = [asyncio.create_task(producer(queue, jobs))]
consumers = [
asyncio.create_task(consumer(queue, i)) for i in range(workers)
]
await asyncio.gather(*producers)
await queue.join()
def test_workers():
"""Simple test cast to run a bunch of workers in parallel."""
asyncio.run(entrypoint())
|
461805
|
import datetime
import os
import requests
import re
import pandas as pd
import numpy as np
from bs4 import BeautifulSoup
class CongresspersonDetails:
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
DATA_PATH = os.path.join(BASE_DIR, 'data')
DATE = datetime.date.today().strftime('%Y-%m-%d')
FILE_BASE_NAME = '{}-congressperson-details.xz'.format(DATE)
CAMARA_URL = ('http://www.camara.leg.br/SitCamaraWS/Deputados.asmx/'
'ObterDetalhesDeputado?ideCadastro={}&numLegislatura=')
CSV_PARAMS = {
'compression': 'xz',
'encoding': 'utf-8',
'index': False
}
def __init__(self):
self.total = 0
def find_newest_file(self, name):
date_regex = re.compile('\d{4}-\d{2}-\d{2}')
matches = (date_regex.findall(f) for f in os.listdir(self.DATA_PATH))
dates = sorted(set([l[0] for l in matches if l]), reverse=True)
for date in dates:
filename = '{}-{}.xz'.format(date, name)
filepath = os.path.join(self.DATA_PATH, filename)
if os.path.isfile(filepath):
return filepath
return None
def read_csv(self, name):
newest_file = self.find_newest_file(name)
if newest_file is None:
msg = 'Could not find the dataset for {}.'.format(newest_file)
raise TypeError(msg)
return pd.read_csv(newest_file, dtype={'congressperson_id': np.str})
def get_all_congresspeople_ids(self):
print('Fetching all congresspeople ids...')
datasets = ('current-year', 'last-year', 'previous-years')
ids = (self.read_csv(name)['congressperson_id'] for name in datasets)
distinct_ids = pd.concat(ids).unique()
self.total = len(distinct_ids)
yield from (str(idx).strip() for idx in distinct_ids)
def write_civil_file(self, congressperson_civil_names):
df = pd.DataFrame(data=congressperson_civil_names)
print('Writing file...')
filepath = os.path.join(self.DATA_PATH, self.FILE_BASE_NAME)
df.to_csv(filepath, **self.CSV_PARAMS)
print('Done.')
@staticmethod
def parse_repository(data, congress_id):
soup = BeautifulSoup(data, 'lxml')
civil_name = soup.find('nomecivil').text
birth_date = soup.find('datanascimento').text
birth_date = datetime.datetime.strptime(birth_date, '%d/%m/%Y').date()
gender = soup.find('sexo').text
return {
'congressperson_id': congress_id,
'civil_name': civil_name,
'birth_date': birth_date,
'gender': gender.upper(),
}
def fetch_data_repository(self, congress_id):
url = self.CAMARA_URL.format(congress_id)
page = requests.get(url)
if page.status_code != 200:
msg = 'HTTP request to {} failed with status code {}'
print(msg.format(url, page.status_code))
return
content = str(page.content.decode('utf-8'))
return self.parse_repository(content, congress_id)
def get_civil_names(self):
congresspeople_ids = self.get_all_congresspeople_ids()
for i, congress_id in enumerate(congresspeople_ids):
if not np.math.isnan(float(congress_id)):
percentage = (i / self.total * 100)
msg = 'Processed {} out of {} ({:.2f}%)'
print(msg.format(i, self.total, percentage), end='\r')
data = self.fetch_data_repository(congress_id)
if data is not None:
yield dict(data)
if __name__ == '__main__':
details = CongresspersonDetails()
details.write_civil_file(details.get_civil_names())
|
461807
|
from django.urls import path
from django_cradmin.apps.cradmin_register_account.views import register_account
urlpatterns = [
path('',
register_account.RegisterAccountView.as_view(),
name="cradmin-register-account"),
]
|
461818
|
import info
class subinfo(info.infoclass):
def setTargets(self):
self.svnTargets['master'] = 'https://github.com/win-iconv/win-iconv.git'
for ver in ['0.0.7', '0.0.8']:
self.targets[ver] = 'https://github.com/win-iconv/win-iconv/archive/v%s.tar.gz' % ver
self.archiveNames[ver] = "win-iconv-%s.tar.gz" % ver
self.targetInstSrc[ver] = 'win-iconv-%s' % ver
self.targetDigests['0.0.8'] = (
['23adea990a8303c6e69e32a64a30171efcb1b73824a1c2da1bbf576b0ae7c520'], CraftHash.HashAlgorithm.SHA256)
self.description = "a character set conversion library binary compatible with GNU iconv"
self.defaultTarget = '0.0.8'
def setDependencies(self):
self.runtimeDependencies["virtual/base"] = None
from Package.CMakePackageBase import *
class Package(CMakePackageBase):
def __init__(self):
CMakePackageBase.__init__(self)
self.subinfo.shelveAble = False
|
461868
|
import copy
import warnings
from math import sqrt, exp, log, cosh, sinh
import numpy as np
import pytest
from scipy import linalg
from numpy.testing import assert_array_almost_equal, assert_array_equal
from sklearn.utils import check_random_state
from sklearn.covariance import EmpiricalCovariance, LedoitWolf
from nilearn._utils.extmath import is_spd
from nilearn.tests.test_signal import generate_signals
from nilearn.connectome.connectivity_matrices import (
_check_square, _check_spd, _map_eigenvalues, _form_symmetric,
_geometric_mean, sym_matrix_to_vec, vec_to_sym_matrix, prec_to_partial,
ConnectivityMeasure)
from pandas import DataFrame
def grad_geometric_mean(mats, init=None, max_iter=10, tol=1e-7):
"""Return the norm of the covariant derivative at each iteration step of
geometric_mean. See its docstring for details.
Norm is intrinsic norm on the tangent space of the manifold of symmetric
positive definite matrices.
Returns
-------
grad_norm : list of float
Norm of the covariant derivative in the tangent space at each step.
"""
mats = np.array(mats)
# Initialization
if init is None:
gmean = np.mean(mats, axis=0)
else:
gmean = init
norm_old = np.inf
step = 1.
grad_norm = []
for n in range(max_iter):
# Computation of the gradient
vals_gmean, vecs_gmean = linalg.eigh(gmean)
gmean_inv_sqrt = _form_symmetric(np.sqrt, 1. / vals_gmean, vecs_gmean)
whitened_mats = [gmean_inv_sqrt.dot(mat).dot(gmean_inv_sqrt)
for mat in mats]
logs = [_map_eigenvalues(np.log, w_mat) for w_mat in whitened_mats]
logs_mean = np.mean(logs, axis=0) # Covariant derivative is
# - gmean.dot(logms_mean)
norm = np.linalg.norm(logs_mean) # Norm of the covariant derivative on
# the tangent space at point gmean
# Update of the minimizer
vals_log, vecs_log = linalg.eigh(logs_mean)
gmean_sqrt = _form_symmetric(np.sqrt, vals_gmean, vecs_gmean)
gmean = gmean_sqrt.dot(
_form_symmetric(np.exp, vals_log * step, vecs_log)).dot(gmean_sqrt)
# Update the norm and the step size
if norm < norm_old:
norm_old = norm
if norm > norm_old:
step = step / 2.
norm = norm_old
grad_norm.append(norm / gmean.size)
if tol is not None and norm / gmean.size < tol:
break
return grad_norm
def test_check_square():
non_square = np.ones((2, 3))
pytest.raises(ValueError, _check_square, non_square)
def test_check_spd():
non_sym = np.array([[0, 1], [0, 0]])
pytest.raises(ValueError, _check_spd, non_sym)
non_spd = np.ones((3, 3))
pytest.raises(ValueError, _check_spd, non_spd)
def test_map_eigenvalues():
# Test on exp map
sym = np.ones((2, 2))
sym_exp = exp(1.) * np.array([[cosh(1.), sinh(1.)], [sinh(1.), cosh(1.)]])
assert_array_almost_equal(_map_eigenvalues(np.exp, sym), sym_exp)
# Test on sqrt map
spd_sqrt = np.array([[2., -1., 0.], [-1., 2., -1.], [0., -1., 2.]])
spd = spd_sqrt.dot(spd_sqrt)
assert_array_almost_equal(_map_eigenvalues(np.sqrt, spd), spd_sqrt)
# Test on log map
spd = np.array([[1.25, 0.75], [0.75, 1.25]])
spd_log = np.array([[0., log(2.)], [log(2.), 0.]])
assert_array_almost_equal(_map_eigenvalues(np.log, spd), spd_log)
def test_geometric_mean_couple():
n_features = 7
spd1 = np.ones((n_features, n_features))
spd1 = spd1.dot(spd1) + n_features * np.eye(n_features)
spd2 = np.tril(np.ones((n_features, n_features)))
spd2 = spd2.dot(spd2.T)
vals_spd2, vecs_spd2 = np.linalg.eigh(spd2)
spd2_sqrt = _form_symmetric(np.sqrt, vals_spd2, vecs_spd2)
spd2_inv_sqrt = _form_symmetric(np.sqrt, 1. / vals_spd2, vecs_spd2)
geo = spd2_sqrt.dot(_map_eigenvalues(np.sqrt, spd2_inv_sqrt.dot(spd1).dot(
spd2_inv_sqrt))).dot(spd2_sqrt)
assert_array_almost_equal(_geometric_mean([spd1, spd2]), geo)
def test_geometric_mean_diagonal():
n_matrices = 20
n_features = 5
diags = []
for k in range(n_matrices):
diag = np.eye(n_features)
diag[k % n_features, k % n_features] = 1e4 + k
diag[(n_features - 1) // (k + 1), (n_features - 1) // (k + 1)] = \
(k + 1) * 1e-4
diags.append(diag)
geo = np.prod(np.array(diags), axis=0) ** (1 / float(len(diags)))
assert_array_almost_equal(_geometric_mean(diags), geo)
def test_geometric_mean_geodesic():
n_matrices = 10
n_features = 6
sym = np.arange(n_features) / np.linalg.norm(np.arange(n_features))
sym = sym * sym[:, np.newaxis]
times = np.arange(n_matrices)
non_singular = np.eye(n_features)
non_singular[1:3, 1:3] = np.array([[-1, -.5], [-.5, -1]])
spds = []
for time in times:
spds.append(non_singular.dot(_map_eigenvalues(np.exp, time * sym)).dot(
non_singular.T))
gmean = non_singular.dot(_map_eigenvalues(np.exp, times.mean() * sym)).dot(
non_singular.T)
assert_array_almost_equal(_geometric_mean(spds), gmean)
def random_diagonal(p, v_min=1., v_max=2., random_state=0):
"""Generate a random diagonal matrix.
Parameters
----------
p : int
The first dimension of the array.
v_min : float, optional (default to 1.)
Minimal element.
v_max : float, optional (default to 2.)
Maximal element.
random_state : int or numpy.random.RandomState instance, optional
random number generator, or seed.
Returns
-------
output : numpy.ndarray, shape (p, p)
A diagonal matrix with the given minimal and maximal elements.
"""
random_state = check_random_state(random_state)
diag = random_state.rand(p) * (v_max - v_min) + v_min
diag[diag == np.amax(diag)] = v_max
diag[diag == np.amin(diag)] = v_min
return np.diag(diag)
def random_spd(p, eig_min, cond, random_state=0):
"""Generate a random symmetric positive definite matrix.
Parameters
----------
p : int
The first dimension of the array.
eig_min : float
Minimal eigenvalue.
cond : float
Condition number, defined as the ratio of the maximum eigenvalue to the
minimum one.
random_state : int or numpy.random.RandomState instance, optional
random number generator, or seed.
Returns
-------
output : numpy.ndarray, shape (p, p)
A symmetric positive definite matrix with the given minimal eigenvalue
and condition number.
"""
random_state = check_random_state(random_state)
mat = random_state.randn(p, p)
unitary, _ = linalg.qr(mat)
diag = random_diagonal(p, v_min=eig_min, v_max=cond * eig_min,
random_state=random_state)
return unitary.dot(diag).dot(unitary.T)
def random_non_singular(p, sing_min=1., sing_max=2., random_state=0):
"""Generate a random nonsingular matrix.
Parameters
----------
p : int
The first dimension of the array.
sing_min : float, optional (default to 1.)
Minimal singular value.
sing_max : float, optional (default to 2.)
Maximal singular value.
random_state : int or numpy.random.RandomState instance, optional
random number generator, or seed.
Returns
-------
output : numpy.ndarray, shape (p, p)
A nonsingular matrix with the given minimal and maximal singular
values.
"""
random_state = check_random_state(random_state)
diag = random_diagonal(p, v_min=sing_min, v_max=sing_max,
random_state=random_state)
mat1 = random_state.randn(p, p)
mat2 = random_state.randn(p, p)
unitary1, _ = linalg.qr(mat1)
unitary2, _ = linalg.qr(mat2)
return unitary1.dot(diag).dot(unitary2.T)
def test_geometric_mean_properties():
n_matrices = 40
n_features = 15
spds = []
for k in range(n_matrices):
spds.append(random_spd(n_features, eig_min=1., cond=10.,
random_state=0))
input_spds = copy.copy(spds)
gmean = _geometric_mean(spds)
# Generic
assert isinstance(spds, list)
for spd, input_spd in zip(spds, input_spds):
assert_array_equal(spd, input_spd)
assert(is_spd(gmean, decimal=7))
# Invariance under reordering
spds.reverse()
spds.insert(0, spds[1])
spds.pop(2)
assert_array_almost_equal(_geometric_mean(spds), gmean)
# Invariance under congruent transformation
non_singular = random_non_singular(n_features, random_state=0)
spds_cong = [non_singular.dot(spd).dot(non_singular.T) for spd in spds]
assert_array_almost_equal(_geometric_mean(spds_cong),
non_singular.dot(gmean).dot(non_singular.T))
# Invariance under inversion
spds_inv = [linalg.inv(spd) for spd in spds]
init = linalg.inv(np.mean(spds, axis=0))
assert_array_almost_equal(_geometric_mean(spds_inv, init=init),
linalg.inv(gmean))
# Gradient norm is decreasing
grad_norm = grad_geometric_mean(spds, tol=1e-20)
difference = np.diff(grad_norm)
assert np.amax(difference) <= 0.
# Check warning if gradient norm in the last step is less than
# tolerance
max_iter = 1
tol = 1e-20
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
gmean = _geometric_mean(spds, max_iter=max_iter, tol=tol)
assert len(w) == 1
grad_norm = grad_geometric_mean(spds, max_iter=max_iter, tol=tol)
assert len(grad_norm) == max_iter
assert grad_norm[-1] > tol
# Evaluate convergence. A warning is printed if tolerance is not reached
for p in [.5, 1.]: # proportion of badly conditioned matrices
spds = []
for k in range(int(p * n_matrices)):
spds.append(random_spd(n_features, eig_min=1e-2, cond=1e6,
random_state=0))
for k in range(int(p * n_matrices), n_matrices):
spds.append(random_spd(n_features, eig_min=1., cond=10.,
random_state=0))
if p < 1:
max_iter = 30
else:
max_iter = 60
gmean = _geometric_mean(spds, max_iter=max_iter, tol=1e-5)
def test_geometric_mean_errors():
n_features = 5
# Non square input matrix
mat1 = np.ones((n_features, n_features + 1))
pytest.raises(ValueError, _geometric_mean, [mat1])
# Input matrices of different shapes
mat1 = np.eye(n_features)
mat2 = np.ones((n_features + 1, n_features + 1))
pytest.raises(ValueError, _geometric_mean, [mat1, mat2])
# Non spd input matrix
pytest.raises(ValueError, _geometric_mean, [mat2])
def test_sym_matrix_to_vec():
sym = np.ones((3, 3))
sqrt2 = 1. / sqrt(2.)
vec = np.array([sqrt2, 1., sqrt2, 1., 1., sqrt2])
assert_array_almost_equal(sym_matrix_to_vec(sym), vec)
vec = np.array([1., 1., 1.])
assert_array_almost_equal(sym_matrix_to_vec(sym, discard_diagonal=True),
vec)
# Check sym_matrix_to_vec is the inverse function of vec_to_sym_matrix
n = 5
p = n * (n + 1) // 2
rand_gen = np.random.RandomState(0)
# when diagonal is included
vec = rand_gen.rand(p)
sym = vec_to_sym_matrix(vec)
assert_array_almost_equal(sym_matrix_to_vec(sym), vec)
# when diagonal given separately
diagonal = rand_gen.rand(n + 1)
sym = vec_to_sym_matrix(vec, diagonal=diagonal)
assert_array_almost_equal(sym_matrix_to_vec(sym, discard_diagonal=True),
vec)
# multiple matrices case when diagonal is included
vecs = np.asarray([vec, 2. * vec, 0.5 * vec])
syms = vec_to_sym_matrix(vecs)
assert_array_almost_equal(sym_matrix_to_vec(syms), vecs)
# multiple matrices case when diagonal is given separately
diagonals = np.asarray([diagonal, 3. * diagonal, -diagonal])
syms = vec_to_sym_matrix(vecs, diagonal=diagonals)
assert_array_almost_equal(sym_matrix_to_vec(syms, discard_diagonal=True),
vecs)
def test_vec_to_sym_matrix():
# Check error if unsuitable size
vec = np.ones(31)
with pytest.raises(ValueError, match='Vector of unsuitable shape'):
vec_to_sym_matrix(vec)
# Check error if given diagonal shape incompatible with vec
vec = np.ones(3)
diagonal = np.zeros(4)
with pytest.raises(ValueError, match='incompatible with vector'):
vec_to_sym_matrix(vec, diagonal)
# Check output value is correct
vec = np.ones(6, )
sym = np.array([[sqrt(2), 1., 1.], [1., sqrt(2), 1.],
[1., 1., sqrt(2)]])
assert_array_almost_equal(vec_to_sym_matrix(vec), sym)
# Check output value is correct with separate diagonal
vec = np.ones(3, )
diagonal = np.ones(3)
assert_array_almost_equal(vec_to_sym_matrix(vec, diagonal=diagonal), sym)
# Check vec_to_sym_matrix is the inverse function of sym_matrix_to_vec
# when diagonal is included
assert_array_almost_equal(vec_to_sym_matrix(sym_matrix_to_vec(sym)), sym)
# when diagonal is discarded
vec = sym_matrix_to_vec(sym, discard_diagonal=True)
diagonal = np.diagonal(sym) / sqrt(2)
assert_array_almost_equal(vec_to_sym_matrix(vec, diagonal=diagonal), sym)
def test_prec_to_partial():
prec = np.array([[2., -1., 1.], [-1., 2., -1.], [1., -1., 1.]])
partial = np.array([[1., .5, -sqrt(2.) / 2.], [.5, 1., sqrt(2.) / 2.],
[-sqrt(2.) / 2., sqrt(2.) / 2., 1.]])
assert_array_almost_equal(prec_to_partial(prec), partial)
def test_connectivity_measure_errors():
# Raising error for input subjects not iterable
conn_measure = ConnectivityMeasure()
pytest.raises(ValueError, conn_measure.fit, 1.)
# Raising error for input subjects not 2D numpy.ndarrays
pytest.raises(ValueError, conn_measure.fit, [np.ones((100, 40)),
np.ones((10,))])
# Raising error for input subjects with different number of features
pytest.raises(ValueError, conn_measure.fit,
[np.ones((100, 40)), np.ones((100, 41))])
# Raising an error for fit_transform with a single subject and
# kind=tangent
conn_measure = ConnectivityMeasure(kind='tangent')
pytest.raises(ValueError, conn_measure.fit_transform,
[np.ones((100, 40)), ])
def test_connectivity_measure_outputs():
n_subjects = 10
n_features = 49
# Generate signals and compute covariances
emp_covs = []
ledoit_covs = []
signals = []
ledoit_estimator = LedoitWolf()
for k in range(n_subjects):
n_samples = 200 + k
signal, _, _ = generate_signals(n_features=n_features, n_confounds=5,
length=n_samples, same_variance=False)
signals.append(signal)
signal -= signal.mean(axis=0)
emp_covs.append((signal.T).dot(signal) / n_samples)
ledoit_covs.append(ledoit_estimator.fit(signal).covariance_)
kinds = ["covariance", "correlation", "tangent", "precision",
"partial correlation"]
# Check outputs properties
for cov_estimator, covs in zip([EmpiricalCovariance(), LedoitWolf()],
[emp_covs, ledoit_covs]):
input_covs = copy.copy(covs)
for kind in kinds:
conn_measure = ConnectivityMeasure(kind=kind,
cov_estimator=cov_estimator)
connectivities = conn_measure.fit_transform(signals)
# Generic
assert isinstance(connectivities, np.ndarray)
assert len(connectivities) == len(covs)
for k, cov_new in enumerate(connectivities):
assert_array_equal(input_covs[k], covs[k])
assert(is_spd(covs[k], decimal=7))
# Positive definiteness if expected and output value checks
if kind == "tangent":
assert_array_almost_equal(cov_new, cov_new.T)
gmean_sqrt = _map_eigenvalues(np.sqrt,
conn_measure.mean_)
assert(is_spd(gmean_sqrt, decimal=7))
assert(is_spd(conn_measure.whitening_, decimal=7))
assert_array_almost_equal(conn_measure.whitening_.dot(
gmean_sqrt), np.eye(n_features))
assert_array_almost_equal(gmean_sqrt.dot(
_map_eigenvalues(np.exp, cov_new)).dot(gmean_sqrt),
covs[k])
elif kind == "precision":
assert(is_spd(cov_new, decimal=7))
assert_array_almost_equal(cov_new.dot(covs[k]),
np.eye(n_features))
elif kind == "correlation":
assert(is_spd(cov_new, decimal=7))
d = np.sqrt(np.diag(np.diag(covs[k])))
if cov_estimator == EmpiricalCovariance():
assert_array_almost_equal(d.dot(cov_new).dot(d),
covs[k])
assert_array_almost_equal(np.diag(cov_new),
np.ones((n_features)))
elif kind == "partial correlation":
prec = linalg.inv(covs[k])
d = np.sqrt(np.diag(np.diag(prec)))
assert_array_almost_equal(d.dot(cov_new).dot(d), -prec +
2 * np.diag(np.diag(prec)))
# Check the mean_
for kind in kinds:
conn_measure = ConnectivityMeasure(kind=kind)
conn_measure.fit_transform(signals)
assert (conn_measure.mean_).shape == (n_features, n_features)
if kind != 'tangent':
assert_array_almost_equal(
conn_measure.mean_,
np.mean(conn_measure.transform(signals), axis=0))
# Check that the mean isn't modified in transform
conn_measure = ConnectivityMeasure(kind='covariance')
conn_measure.fit(signals[:1])
mean = conn_measure.mean_
conn_measure.transform(signals[1:])
assert_array_equal(mean, conn_measure.mean_)
# Check vectorization option
for kind in kinds:
conn_measure = ConnectivityMeasure(kind=kind)
connectivities = conn_measure.fit_transform(signals)
conn_measure = ConnectivityMeasure(vectorize=True, kind=kind)
vectorized_connectivities = conn_measure.fit_transform(signals)
assert_array_almost_equal(vectorized_connectivities,
sym_matrix_to_vec(connectivities))
# Check not fitted error
with pytest.raises(ValueError, match='has not been fitted. '):
ConnectivityMeasure().inverse_transform(vectorized_connectivities)
# Check inverse transformation
kinds.remove('tangent')
for kind in kinds:
# without vectorization: input matrices are returned with no change
conn_measure = ConnectivityMeasure(kind=kind)
connectivities = conn_measure.fit_transform(signals)
assert_array_almost_equal(
conn_measure.inverse_transform(connectivities), connectivities)
# with vectorization: input vectors are reshaped into matrices
# if diagonal has not been discarded
conn_measure = ConnectivityMeasure(kind=kind, vectorize=True)
vectorized_connectivities = conn_measure.fit_transform(signals)
assert_array_almost_equal(
conn_measure.inverse_transform(vectorized_connectivities),
connectivities)
# with vectorization if diagonal has been discarded
for kind in ['correlation', 'partial correlation']:
connectivities = ConnectivityMeasure(kind=kind).fit_transform(signals)
conn_measure = ConnectivityMeasure(kind=kind, vectorize=True,
discard_diagonal=True)
vectorized_connectivities = conn_measure.fit_transform(signals)
assert_array_almost_equal(
conn_measure.inverse_transform(vectorized_connectivities),
connectivities)
for kind in ['covariance', 'precision']:
connectivities = ConnectivityMeasure(kind=kind).fit_transform(signals)
conn_measure = ConnectivityMeasure(kind=kind, vectorize=True,
discard_diagonal=True)
vectorized_connectivities = conn_measure.fit_transform(signals)
diagonal = np.array([np.diagonal(conn) / sqrt(2) for conn in
connectivities])
inverse_transformed = conn_measure.inverse_transform(
vectorized_connectivities, diagonal=diagonal)
assert_array_almost_equal(inverse_transformed, connectivities)
with pytest.raises(ValueError,
match='can not reconstruct connectivity matrices'):
conn_measure.inverse_transform(vectorized_connectivities)
# for 'tangent' kind, covariance matrices are reconstructed
# without vectorization
tangent_measure = ConnectivityMeasure(kind='tangent')
displacements = tangent_measure.fit_transform(signals)
covariances = ConnectivityMeasure(kind='covariance').fit_transform(
signals)
assert_array_almost_equal(
tangent_measure.inverse_transform(displacements), covariances)
# with vectorization
# when diagonal has not been discarded
tangent_measure = ConnectivityMeasure(kind='tangent', vectorize=True)
vectorized_displacements = tangent_measure.fit_transform(signals)
assert_array_almost_equal(
tangent_measure.inverse_transform(vectorized_displacements),
covariances)
# when diagonal has been discarded
tangent_measure = ConnectivityMeasure(kind='tangent', vectorize=True,
discard_diagonal=True)
vectorized_displacements = tangent_measure.fit_transform(signals)
diagonal = np.array([np.diagonal(matrix) / sqrt(2) for matrix in
displacements])
inverse_transformed = tangent_measure.inverse_transform(
vectorized_displacements, diagonal=diagonal)
assert_array_almost_equal(inverse_transformed, covariances)
with pytest.raises(ValueError,
match='can not reconstruct connectivity matrices'):
tangent_measure.inverse_transform(vectorized_displacements)
def test_confounds_connectome_measure():
n_subjects = 10
n_features = 49
# Generate signals and compute covariances and apply confounds while
# computing covariances
signals = []
for k in range(n_subjects):
n_samples = 200 + k
signal, _, confounds = generate_signals(n_features=n_features,
n_confounds=5,
length=n_samples,
same_variance=False)
signals.append(signal)
correlation_measure = ConnectivityMeasure(kind='correlation',
vectorize=True)
# Clean confounds on 10 subjects with confounds filtered to 10 subjects in
# length
cleaned_vectors = correlation_measure.fit_transform(signals,
confounds=confounds[0:10])
zero_matrix = np.zeros((confounds.shape[1], cleaned_vectors.shape[1]))
assert_array_almost_equal(
np.dot(confounds[0:10].T, cleaned_vectors), zero_matrix)
assert(isinstance(cleaned_vectors, np.ndarray))
# Confounds as pandas DataFrame
confounds_df = DataFrame(confounds[0:10])
cleaned_vectors_df = correlation_measure.fit_transform(
signals, confounds=confounds_df)
# Raising error for input confounds are not iterable
conn_measure = ConnectivityMeasure(vectorize=True)
pytest.raises(ValueError, conn_measure._check_input, signals, confounds=1.)
pytest.raises(ValueError, conn_measure._fit_transform,
X=signals, do_fit=True, do_transform=True,
confounds=1.)
pytest.raises(ValueError, conn_measure.fit_transform, signals, None, 1.)
# Raising error for input confounds are given but not vectorize=True
conn_measure = ConnectivityMeasure(vectorize=False)
pytest.raises(ValueError, conn_measure.fit_transform,
signals, None, confounds[0:10])
|
461891
|
def _run(script):
global __file__
import os, sys
sys.frozen = 'macosx_plugin'
base = os.environ['RESOURCEPATH']
__file__ = path = os.path.join(base, script)
if sys.version_info[0] == 2:
with open(path, 'rU') as fp:
source = fp.read() + "\n"
else:
with open(path, 'r', encoding='utf-8') as fp:
source = fp.read() + '\n'
exec(compile(source, script, 'exec'), globals(), globals())
_run('plugin.py')
|
461896
|
import sys
import os
path = "DataBackups"
auth = ["User"]
fitapp = ["UserFitbit", "TimeSeriesDataType", "TimeSeriesData"]
tracktivityPetsWebsite = ["Inventory",
"Level",
"Scenery",
"CollectedScenery",
"Pet",
"CollectedPet",
"BodyPart",
"Item",
"CollectedItem",
"Profile",
"Happiness",
"Experience",
"Mood",
"Phrase",
"Story",
"MicroChallenge",
"MicroChallengeMedal",
"MicroChallengeState",
"UserMicroChallengeState",
"UserMicroChallenge",
"PetSwap",
"MicroChallengeGoal",
"UserMicroChallengeGoalStatus",
"UserNotification"]
if __name__ == "__main__":
if sys.argv[1] == "-dump":
for i in auth:
print("Running command: python manage.py dumpdata auth.{0} --indent 4 --format json > {1}/{0}.json".format(i, path))
os.system("python manage.py dumpdata auth.{0} --indent 4 --format json > {1}/{0}.json".format(i, path))
for i in fitapp:
print("Running command: python manage.py dumpdata fitapp.{0} --indent 4 --format json > {1}/{0}.json".format(i, path))
os.system("python manage.py dumpdata fitapp.{0} --indent 4 --format json > {1}/{0}.json".format(i, path))
for i in tracktivityPetsWebsite:
print("Running command: python manage.py dumpdata tracktivityPetsWebsite.{0} --indent 4 --format json > {1}/{0}.json".format(i, path))
os.system("python manage.py dumpdata tracktivityPetsWebsite.{0} --indent 4 --format json > {1}/{0}.json".format(i, path))
elif sys.argv[1] == "-load":
for i in auth:
print("Running command: python manage.py loaddata {1}/{0}.json".format(i, path))
os.system("python manage.py loaddata {1}/{0}.json".format(i, path))
for i in fitapp:
print("Running command: python manage.py loaddata {1}/{0}.json".format(i, path))
os.system("python manage.py loaddata {1}/{0}.json".format(i, path))
for i in tracktivityPetsWebsite:
print("Running command: python manage.py loaddata {1}/{0}.json".format(i, path))
os.system("python manage.py loaddata {1}/{0}.json".format(i, path))
|
461904
|
from __future__ import unicode_literals
from .. import Provider as CompanyProvider
class Provider(CompanyProvider):
formats = (
'{{last_name}} {{company_suffix}}',
'{{last_name}} {{last_name}} {{company_suffix}}',
'{{last_name}} {{last_name}} {{company_suffix}}',
'{{last_name}}',
)
company_suffixes = (
'As Oy', 'Tmi', 'Oy', 'Oyj', 'Ky', 'Osk', 'ry',
)
def company_business_id(self):
"""
Returns Finnish company Business Identity Code (y-tunnus).
Format is 8 digits - e.g. FI99999999,[8] last digit is a check
digit utilizing MOD 11-2. The first digit is zero for some old
organizations. This function provides current codes starting with
non-zero.
"""
def calculate_checksum(number):
"""Calculate the checksum using mod 11,2 method"""
factors = [7, 9, 10, 5, 8, 4, 2]
sum_ = 0
for x, y in zip(number, factors):
sum_ = sum_ + int(x) * y
if sum_ % 11 == 0:
return '0'
else:
return str(11 - sum_ % 11)
first_digit = str(self.random_digit_not_null())
body = first_digit + self.bothify('######')
cs = calculate_checksum(body)
return body + '-' + str(cs)
def company_vat(self):
"""
Returns Finnish VAT identification number (Arvonlisaveronumero).
This can be calculated from company business identity code by
adding prefix "FI" and removing dash before checksum.
"""
def convert_to_vat(business_id):
"""
Convert business id to VATIN
"""
return 'FI' + business_id.replace('-', '')
return convert_to_vat(self.company_business_id())
|
461927
|
import FWCore.ParameterSet.Config as cms
from Configuration.Eras.Era_Run2_2018_cff import Run2_2018
process = cms.Process("HFTEST",Run2_2018)
process.load("FWCore.MessageService.MessageLogger_cfi")
process.load("SimGeneral.HepPDTESSource.pdt_cfi")
process.load('Configuration.StandardSequences.Generator_cff')
#--- Magnetic Field
process.load("Configuration.StandardSequences.MagneticField_cff")
#--- Full geometry or only HCAL+ECAL Geometry
# include "Geometry/CMSCommonData/data/cmsIdealGeometryXML.cfi"
# include "Geometry/TrackerNumberingBuilder/data/trackerNumberingGeometry.cfi"
process.load("Geometry.CMSCommonData.ecalhcalGeometryXML_cfi")
process.load("Geometry.EcalCommonData.ecalSimulationParameters_cff")
process.load("Geometry.HcalCommonData.hcalDDConstants_cff")
process.load("SimG4Core.Application.g4SimHits_cfi")
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
from Configuration.AlCa.autoCond import autoCond
process.GlobalTag.globaltag = autoCond['run2_mc']
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1000)
)
process.Timing = cms.Service("Timing")
process.load("IOMC.RandomEngine.IOMC_cff")
process.RandomNumberGeneratorService.generator.initialSeed = 456789
process.RandomNumberGeneratorService.g4SimHits.initialSeed = 9876
process.RandomNumberGeneratorService.VtxSmeared.initialSeed = 123456789
process.rndmStore = cms.EDProducer("RandomEngineStateProducer")
process.source = cms.Source("EmptySource")
process.generator = cms.EDProducer("FlatRandomEGunProducer",
PGunParameters = cms.PSet(
PartID = cms.vint32(11, 211),
MinEta = cms.double(3.02),
MaxEta = cms.double(5.10),
MinPhi = cms.double(-3.14159265359),
MaxPhi = cms.double(3.14159265359),
MinE = cms.double(1000.0),
MaxE = cms.double(5000.0)
),
AddAntiParticle = cms.bool(False),
Verbosity = cms.untracked.int32(0),
firstRun = cms.untracked.uint32(1)
)
process.VtxSmeared = cms.EDProducer("GaussEvtVtxGenerator",
MeanX = cms.double(0.0),
MeanY = cms.double(0.0),
MeanZ = cms.double(0.0),
SigmaY = cms.double(0.0001),
SigmaX = cms.double(0.0001),
SigmaZ = cms.double(0.0001),
TimeOffset = cms.double(0.0),
src = cms.InputTag("generator","unsmeared")
)
process.o1 = cms.OutputModule("PoolOutputModule",
fileName = cms.untracked.string('HF_test_ecalplushcalonly_nofield.root')
)
process.p1 = cms.Path(process.generator*process.VtxSmeared*process.generatorSmeared*process.g4SimHits)
process.outpath = cms.EndPath(process.o1)
process.MessageLogger.cerr.default.limit = 100
process.g4SimHits.UseMagneticField = False
process.g4SimHits.OnlySDs = ['EcalSensitiveDetector', 'CaloTrkProcessing', 'HcalSensitiveDetector']
|
461928
|
from Core.HookWindow import PixelMatchesColor
class ScanStages:
def __init__(self, name):
self.stage = 0
self.name = name
def ScanStages(self, Localization, color, colorFull):
if PixelMatchesColor(Localization[0] + 100, Localization[1],
(colorFull[0], colorFull[1], colorFull[2])):
self.stage = 100
# print(f"Get {self.name}: {self.stage}%")
return self.stage
else:
for i in range(95, 5, -5):
if PixelMatchesColor(Localization[0] + i, Localization[1], (color[0], color[1], color[2])):
self.stage = i
# print(f"Get {self.name}: {self.stage}%")
return self.stage
|
461930
|
from transformers.optimization import get_cosine_schedule_with_warmup , get_linear_schedule_with_warmup
from tqdm import tqdm
import torch as tc
import pdb
import os , sys
import math
import fitlog
import pickle
from models import get_model
from test import test
from utils.train_util import get_data_from_batch
def before_train(C , logger , train_data , n_rel_typs):
batch_numb = (len(train_data) // C.batch_size) + int((len(train_data) % C.batch_size) != 0)
device = tc.device(C.device)
model = get_model()(
n_rel_typs = n_rel_typs , dropout = C.dropout ,
device = C.gpus[0] ,
gnn = C.gnn , matrix_trans = C.matrix_trans , matrix_nlayer = C.matrix_nlayer ,
).to(C.gpus[0])
model = tc.nn.DataParallel(model , C.gpus)
optimizer = tc.optim.Adam(params = model.parameters() , lr = C.lr)
scheduler_makers = {
"linear": get_linear_schedule_with_warmup ,
"cosine": get_cosine_schedule_with_warmup ,
}
scheduler = scheduler_makers[C.scheduler](
optimizer = optimizer ,
num_warmup_steps = int(C.warmup_prop * batch_numb * C.epoch_numb),
num_training_steps = batch_numb * C.epoch_numb ,
)
return (batch_numb , device) , (model , optimizer , scheduler)
def update_batch(C , logger ,
model , optimizer , scheduler , loss_func ,
sents , ents , anss , data_ent ,
):
pred = model(sents , ents , devices = C.gpus)
loss = loss_func(pred , anss , ents)
#----- backward -----
optimizer.zero_grad()
loss.backward()
optimizer.step()
scheduler.step()
return loss , pred
def train(C , logger , train_data , valid_data , loss_func , generator , n_rel_typs , run_name = "0" , test_data = None):
(batch_numb , device) , (model , optimizer , scheduler) = before_train(
C , logger , train_data , n_rel_typs
)
#----- iterate each epoch -----
best_epoch = -1
best_metric = -1
for epoch_id in range(C.epoch_numb):
pbar = tqdm(range(batch_numb) , ncols = 70)
avg_loss = 0
for batch_id in pbar:
#----- get data -----
data = train_data[batch_id * C.batch_size : (batch_id+1) * C.batch_size]
sents , ents , anss , data_ent = get_data_from_batch(data , device = device)
loss , pred = update_batch(
C , logger , model , optimizer , scheduler , loss_func , sents , ents , anss , data_ent
)
avg_loss += float(loss)
fitlog.add_loss(value = float(loss) , step = epoch_id * batch_numb + batch_id ,
name = "({0})train loss".format(run_name))
pbar.set_description_str("(Train)Epoch %d" % (epoch_id))
pbar.set_postfix_str("loss = %.4f (avg = %.4f)" % ( float(loss) , avg_loss / (batch_id+1)))
logger.log ("Epoch %d ended. avg_loss = %.4f" % (epoch_id , avg_loss / batch_numb))
micro_f1 , macro_f1 , test_loss = test(
C , logger ,
valid_data , model ,
loss_func , generator ,
"valid" , epoch_id , run_name ,
)
if C.valid_metric in ["macro*micro" , "micro*macro"]:
metric = macro_f1 * micro_f1
elif C.valid_metric == "macro":
metric = macro_f1
elif C.valid_metric == "micro":
metric = micro_f1
else:
assert False
if best_metric < metric:
best_epoch = epoch_id
best_metric = metric
with open(C.tmp_file_name + ".model" + "." + str(run_name) , "wb") as fil:
pickle.dump(model , fil)
# fitlog.add_best_metric(best_macro_f1 , name = "({0})macro f1".format(ensemble_id))
model = model.train()
if not C.no_valid: #reload best model
with open(C.tmp_file_name + ".model" + "." + str(run_name) , "rb") as fil:
model = pickle.load(fil) #load best valid model
logger.log("reloaded best model at epoch %d" % best_epoch)
if test_data is not None:
final_micro_f1 , final_macro_f1 , final_test_loss = test(
C , logger ,
test_data , model ,
loss_func , generator ,
"test" , epoch_id , run_name ,
)
return model , best_metric
|
461941
|
from .common import *
ENVIRONMENT = 'development'
DEBUG = True
TEMPLATE_DEBUG = True
INSTALLED_APPS += (
'debug_toolbar',
)
MIDDLEWARE_CLASSES += (
'debug_toolbar.middleware.DebugToolbarMiddleware',
)
from fnmatch import fnmatch
class glob_list(list):
def __contains__(self, key):
for elt in self:
if fnmatch(key, elt): return True
return False
INTERNAL_IPS = glob_list(['127.0.0.1', '10.0.*.*'])
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
DEBUG_TOOLBAR_PATCH_SETTINGS = False
|
461965
|
import json
from datetime import datetime
def extract(filename):
"""
Extract data from json.
:param time: filepath to json
:type time: str
:return: (time, name, [timestamps], [latitude], [longitude])
:rtype: tuple
"""
with open(filename, 'r') as f:
data = json.load(f)
if(data['type']=='run'):
timestamps, lat, lon = [], [], []
try:
starttime = datetime.utcfromtimestamp(data['start_epoch_ms']/1000)
time = starttime.replace(microsecond=0).isoformat()+'Z'
try:
name = data['tags']['com.nike.name']
except:
name = starttime.strftime('%A')
for metric in data['metrics']:
if(metric['type']=='latitude'):
lat_values = metric['values']
elif(metric['type']=='longitude'):
lon_values = metric['values']
for lat1, lon1 in zip(lat_values, lon_values):
timestamps.append(datetime.utcfromtimestamp(lat1['start_epoch_ms']/1000).replace(microsecond=0).isoformat()+'Z')
lat.append(lat1['value'])
lon.append(lon1['value'])
return time, name, timestamps, lat, lon;
except:
return None
|
461969
|
from segmentation_models_pytorch.base import (ClassificationHead,
SegmentationHead,
SegmentationModel)
from segmentation_models_pytorch.base import initialization as init
from segmentation_models_pytorch.encoders import get_encoder
from segmentation_models_pytorch.fpn.decoder import *
from trainer.start import *
from .common import *
@dataclass
class FPNConfig(BaseConfig):
n_out: int
backbone: str = 'resnet50'
n_in: int = 1
n_pyramid_ch: int = 256
n_dec_ch: int = 128
weights: str = 'imagenet'
# 'original', 'custom'
segment_block: str = 'original'
# 'groupnorm', 'batchnorm'
use_norm: str = 'groupnorm'
n_group: int = 32
decoder_activation: str = 'relu'
@property
def name(self):
name = f'fpn-{self.backbone}-py{self.n_pyramid_ch}dec{self.n_dec_ch}'
if self.segment_block != 'original':
if self.use_norm == 'groupnorm':
name += f'-gn{self.n_group}'
elif self.use_norm == 'batchnorm':
name += f'-bn'
else:
raise NotImplementedError()
if self.decoder_activation != 'relu':
name += f'-{self.decoder_activation}'
if self.weights is not None:
name += f'-{self.weights}'
return name
def make_model(self):
return FPN(self)
class FPN(nn.Module):
def __init__(self, conf: FPNConfig):
super().__init__()
self.conf = conf
self.net = FPNCustom(
conf.backbone,
in_channels=conf.n_in,
encoder_weights=conf.weights,
classes=conf.n_out,
upsampling=1,
decoder_dropout=0,
decoder_pyramid_channels=conf.n_pyramid_ch,
decoder_segmentation_channels=conf.n_dec_ch,
segment_block=conf.segment_block,
use_norm=conf.use_norm,
n_group=conf.n_group,
decoder_activation=conf.decoder_activation,
)
self.pool = nn.AdaptiveMaxPool2d(1)
def forward(self, img, classification=None, **kwargs):
# enforce float32 is a good idea
# because if the loss function involves a reduction operation
# it would be harmful, this prevents the problem
seg = self.net(img).float()
pred = self.pool(seg)
pred = torch.flatten(pred, start_dim=1)
loss = None
loss_pred = None
loss_bbox = None
if classification is not None:
loss_pred = F.binary_cross_entropy_with_logits(
pred, classification.float())
loss = loss_pred
return ModelReturn(
pred=pred,
pred_seg=seg,
loss=loss,
loss_pred=loss_pred,
loss_bbox=loss_bbox,
)
class FPNCustom(SegmentationModel):
def __init__(
self,
encoder_name: str = "resnet34",
encoder_depth: int = 5,
encoder_weights: Optional[str] = "imagenet",
decoder_pyramid_channels: int = 256,
decoder_segmentation_channels: int = 128,
decoder_dropout: float = 0.2,
in_channels: int = 3,
classes: int = 1,
activation: Optional[str] = None,
upsampling: int = 4,
aux_params: Optional[dict] = None,
segment_block='original',
use_norm='groupnorm',
decoder_activation='relu',
decoder_negative_slope=0.01,
n_group=32,
**kwargs,
):
super().__init__()
self.encoder = get_encoder(
encoder_name,
in_channels=in_channels,
depth=encoder_depth,
weights=encoder_weights,
)
self.decoder = FPNDecoder(
encoder_channels=self.encoder.out_channels,
encoder_depth=encoder_depth,
pyramid_channels=decoder_pyramid_channels,
segmentation_channels=decoder_segmentation_channels,
dropout=decoder_dropout,
segment_block=segment_block,
use_norm=use_norm,
activation=decoder_activation,
negative_slope=decoder_negative_slope,
n_group=n_group,
)
self.segmentation_head = SegmentationHead(
in_channels=self.decoder.out_channels,
out_channels=classes,
activation=activation,
kernel_size=1,
upsampling=upsampling,
)
if aux_params is not None:
self.classification_head = ClassificationHead(
in_channels=self.encoder.out_channels[-1], **aux_params)
else:
self.classification_head = None
self.name = "fpn-{}".format(encoder_name)
self.initialize()
class FPNDecoder(nn.Module):
def __init__(
self,
encoder_channels,
encoder_depth=5,
pyramid_channels=256,
segmentation_channels=128,
segment_block='original',
dropout=0.2,
merge_policy="add",
use_norm='groupnorm',
activation='relu',
negative_slope=0.01,
n_group=32,
):
super().__init__()
self.out_channels = segmentation_channels
if encoder_depth < 3:
raise ValueError(
"Encoder depth for FPN decoder cannot be less than 3, got {}.".
format(encoder_depth))
encoder_channels = encoder_channels[::-1]
encoder_channels = encoder_channels[:encoder_depth + 1]
self.p5 = nn.Conv2d(encoder_channels[0],
pyramid_channels,
kernel_size=1)
self.p4 = FPNBlock(pyramid_channels, encoder_channels[1])
self.p3 = FPNBlock(pyramid_channels, encoder_channels[2])
self.p2 = FPNBlock(pyramid_channels, encoder_channels[3])
seg_opts = {
'original': SegmentationBlock,
'simple': SegmentationBlockSimple,
'custom': SegmentatioBlockCustom,
}
if segment_block == 'custom':
seg_args = dict(use_norm=use_norm,
activation=activation,
negative_slope=negative_slope,
n_group=n_group)
else:
seg_args = {}
self.seg_blocks = nn.ModuleList([
seg_opts[segment_block](
pyramid_channels,
segmentation_channels,
n_upsamples=n_upsamples,
**seg_args,
) for n_upsamples in [3, 2, 1, 0]
])
self.merge = MergeBlock(merge_policy)
self.dropout = nn.Dropout2d(p=dropout, inplace=True)
def forward(self, *features):
c2, c3, c4, c5 = features[-4:]
p5 = self.p5(c5)
p4 = self.p4(p5, c4)
p3 = self.p3(p4, c3)
p2 = self.p2(p3, c2)
feature_pyramid = [
seg_block(p)
for seg_block, p in zip(self.seg_blocks, [p5, p4, p3, p2])
]
x = self.merge(feature_pyramid)
x = self.dropout(x)
return x
class SegmentatioBlockCustom(nn.Module):
"""able to change norm"""
def __init__(self,
in_channels,
out_channels,
n_upsamples=0,
use_norm='groupnorm',
activation='relu',
negative_slope=0.01,
n_group=32):
super().__init__()
blocks = [
Conv3x3NormReLU(in_channels,
out_channels,
upsample=bool(n_upsamples),
use_norm=use_norm,
activation=activation,
negative_slope=negative_slope,
n_group=n_group)
]
if n_upsamples > 1:
for _ in range(1, n_upsamples):
blocks.append(
Conv3x3NormReLU(out_channels,
out_channels,
upsample=True,
use_norm=use_norm,
activation=activation,
negative_slope=negative_slope,
n_group=n_group))
self.block = nn.Sequential(*blocks)
def forward(self, x):
return self.block(x)
class SegmentationBlockSimple(nn.Module):
"""simple segmentation block has only one conv layer + upsample (the rest)"""
def __init__(self, in_channels, out_channels, n_upsamples=0):
super().__init__()
self.n_upsamples = n_upsamples
self.block = Conv3x3GNReLU(in_channels, out_channels, upsample=False)
def forward(self, x):
x = self.block(x)
for i in range(self.n_upsamples):
x = F.interpolate(x,
scale_factor=2,
mode="bilinear",
align_corners=True)
return x
class Conv3x3NormReLU(nn.Module):
"""custom the norm"""
def __init__(self,
in_channels,
out_channels,
upsample=False,
use_norm='groupnorm',
activation='relu',
negative_slope=0.01,
n_group=32):
super().__init__()
self.upsample = upsample
norm_opts = {
'groupnorm': lambda: nn.GroupNorm(n_group, out_channels),
'batchnorm': lambda: nn.BatchNorm2d(out_channels),
}
act_opts = {
'relu':
lambda: nn.ReLU(inplace=True),
'lrelu':
lambda: nn.LeakyReLU(negative_slope=negative_slope, inplace=True),
}
self.block = nn.Sequential(
nn.Conv2d(in_channels,
out_channels, (3, 3),
stride=1,
padding=1,
bias=False),
norm_opts[use_norm](),
act_opts[activation](),
)
def forward(self, x):
x = self.block(x)
if self.upsample:
x = F.interpolate(x,
scale_factor=2,
mode="bilinear",
align_corners=True)
return x
|
461976
|
import copy
import itertools
import uuid
from collections import defaultdict
from operator import itemgetter
import boto3
from botocore.exceptions import ClientError
from botocore.stub import Stubber
def stubbed(function):
"""
A decorator that activates/deactivates the Stubber and makes sure all
expected calls are made.
The general pattern for stubbing a new method is:
@stubbed
def method(self, **kwargs):
self.stubber.add_response(
method="method", # Name of the method being stubbed
service_response={}, # Stubber validates the resposne shape
expected_params(**kwargs), # Stubber validates the params
)
self.client.method(**kwargs) # "super" (except we're not actually
# subclassing anything)
"""
def wrapper(*args, **kwargs):
self = args[0]
# If we're the first stub, activate:
if not self.stub_count:
self.stubber.activate()
self.stub_count += 1
try:
response = function(*args, **kwargs)
# If we're the outermost stub, clean up
self.stub_count -= 1
if not self.stub_count:
self.stubber.deactivate()
self.stubber.assert_no_pending_responses()
return copy.deepcopy(response)
except Exception as ex:
# Exceptions should reset the stubber
self.stub_count = 0
self.stubber.deactivate()
self.stubber = Stubber(self.client)
raise ex
return wrapper
class StubbedEcsError(Exception):
pass
class StubbedEcs:
"""
A class that stubs ECS responses using botocore's Stubber:
https://botocore.amazonaws.com/v1/documentation/api/latest/reference/stubber.html
Stubs are minimally sufficient for testing existing Dagster ECS behavior;
consequently, not all endpoints are stubbed and not all stubbed endpoints
are stubbed exhaustively.
Stubber validates that we aren't passing invalid parameters or returning
an invalid response shape. Additionally, some resources (tasks, tags,
task_definitions) are stored in the instance which allows methods to have
some minimal interaction - for example, you can't run a task if you haven't
first created a task definition.
We can't extend botocore.client.ECS directly. Eventually, we might want to
register these methods as events instead of maintaing our own StubbedEcs
class:
https://boto3.amazonaws.com/v1/documentation/api/latest/guide/events.html
"""
def __init__(self, boto3_client):
self.client = boto3_client
self.stubber = Stubber(self.client)
self.meta = self.client.meta
self.tasks = defaultdict(list)
self.task_definitions = defaultdict(list)
self.tags = defaultdict(list)
self.stub_count = 0
@stubbed
def describe_task_definition(self, **kwargs):
family = kwargs.get("taskDefinition") or ""
revision = None
if ":" in family:
# We received either an ARN or family:revision
family, revision = family.split(":")[-2:]
if "/" in family:
# We received an ARN
family = family.split("/")[-1]
task_definitions = self.task_definitions.get(family, [])
if revision:
# Match the exact revision
task_definition = next(
(
task_definition
for task_definition in task_definitions
if task_definition["revision"] == int(revision)
),
None,
)
else:
# Get the latest revision
task_definition = next(
iter(sorted(task_definitions, key=itemgetter("revision"), reverse=True)), None
)
if task_definition:
self.stubber.add_response(
method="describe_task_definition",
service_response={"taskDefinition": task_definition},
expected_params={**kwargs},
)
else:
self.stubber.add_client_error(
method="describe_task_definition", expected_params={**kwargs}
)
return self.client.describe_task_definition(**kwargs)
@stubbed
def describe_tasks(self, **kwargs):
cluster = self._cluster(kwargs.get("cluster"))
arns = kwargs.get("tasks")
for i, arn in enumerate(arns):
if ":" not in arn:
# We received just a task ID, not a full ARN
arns[i] = self._arn("task", f"{cluster}/{arn}")
tasks = [task for task in self.tasks[cluster] if task["taskArn"] in arns]
self.stubber.add_response(
method="describe_tasks",
service_response={"tasks": tasks},
expected_params={**kwargs},
)
return self.client.describe_tasks(**kwargs)
@stubbed
def list_tags_for_resource(self, **kwargs):
"""
Only task tagging is stubbed; other resources won't work.
"""
arn = kwargs.get("resourceArn")
if self._task_exists(arn):
self.stubber.add_response(
method="list_tags_for_resource",
service_response={"tags": self.tags.get(arn, [])},
expected_params={**kwargs},
)
else:
self.stubber.add_client_error(
method="list_tags_for_resource", expected_params={**kwargs}
)
return self.client.list_tags_for_resource(**kwargs)
@stubbed
def list_task_definitions(self, **kwargs):
arns = [
task_definition["taskDefinitionArn"]
for task_definition in itertools.chain.from_iterable(self.task_definitions.values())
]
self.stubber.add_response(
method="list_task_definitions",
service_response={"taskDefinitionArns": arns},
expected_params={**kwargs},
)
return self.client.list_task_definitions(**kwargs)
@stubbed
def list_tasks(self, **kwargs):
"""
Only filtering by family and cluster is stubbed.
TODO: Pagination
"""
cluster = self._cluster(kwargs.get("cluster"))
family = kwargs.get("family")
tasks = self.tasks[cluster]
if family:
tasks = [
task
for task in tasks
# family isn't part of task response can be infered from the arn
if task["taskDefinitionArn"].split("/")[-1].split(":")[0] == family
]
arns = [task["taskArn"] for task in tasks]
self.stubber.add_response(
method="list_tasks",
service_response={"taskArns": arns},
expected_params={**kwargs},
)
return self.client.list_tasks(**kwargs)
@stubbed
def register_task_definition(self, **kwargs):
family = kwargs.get("family")
# Revisions are 1 indexed
revision = len(self.task_definitions[family]) + 1
arn = self._task_definition_arn(family, revision)
task_definition = {
"family": family,
"revision": revision,
"taskDefinitionArn": arn,
**kwargs,
}
self.task_definitions[family].append(task_definition)
self.stubber.add_response(
method="register_task_definition",
service_response={"taskDefinition": task_definition},
expected_params={**kwargs},
)
return self.client.register_task_definition(**kwargs)
@stubbed
def run_task(self, **kwargs):
"""
run_task is an endpoint with complex behaviors and consequently is not
exhaustively stubbed.
"""
try:
task_definition = self.describe_task_definition(
taskDefinition=kwargs.get("taskDefinition")
)["taskDefinition"]
is_awsvpc = task_definition.get("networkMode") == "awsvpc"
containers = []
for container in task_definition.get("containerDefinitions", []):
containers.append(
{key: value for key, value in container.items() if key in ["name", "image"]}
)
network_configuration = kwargs.get("networkConfiguration", {})
vpc_configuration = network_configuration.get("awsvpcConfiguration")
container_overrides = kwargs.get("overrides", {}).get("containerOverrides", [])
if is_awsvpc:
if not network_configuration:
raise StubbedEcsError
if not vpc_configuration:
raise StubbedEcsError
cluster = self._cluster(kwargs.get("cluster"))
count = kwargs.get("count", 1)
tasks = []
for _ in range(count):
arn = self._task_arn(cluster)
task = {
"attachments": [],
"clusterArn": self._cluster_arn(cluster),
"containers": containers,
"lastStatus": "RUNNING",
"overrides": {"containerOverrides": container_overrides},
"taskArn": arn,
"taskDefinitionArn": task_definition["taskDefinitionArn"],
}
if vpc_configuration:
for subnet in vpc_configuration["subnets"]:
ec2 = boto3.resource("ec2", region_name=self.client.meta.region_name)
subnet = ec2.Subnet(subnet)
# The provided subnet doesn't exist
subnet.load()
network_interfaces = list(subnet.network_interfaces.all())
# There's no Network Interface associated with the subnet
if not network_interfaces:
raise StubbedEcsError
task["attachments"].append(
{
"type": "ElasticNetworkInterface",
"details": [
{"name": "subnetId", "value": subnet.id},
{
"name": "networkInterfaceId",
"value": network_interfaces[0].id,
},
],
}
)
tasks.append(task)
self.stubber.add_response(
method="run_task",
service_response={"tasks": tasks},
expected_params={**kwargs},
)
self.tasks[cluster] += tasks
except (StubbedEcsError, ClientError):
self.stubber.add_client_error(method="run_task", expected_params={**kwargs})
return self.client.run_task(**kwargs)
@stubbed
def stop_task(self, **kwargs):
cluster = self._cluster(kwargs.get("cluster"))
task = kwargs.get("task")
tasks = self.describe_tasks(tasks=[task], cluster=cluster)["tasks"]
if tasks:
stopped_task = tasks[0]
self.tasks[cluster].remove(tasks[0])
stopped_task["lastStatus"] = "STOPPED"
self.tasks[cluster].append(stopped_task)
self.stubber.add_response(
method="stop_task",
service_response={"task": stopped_task},
expected_params={**kwargs},
)
else:
self.stubber.add_client_error(method="stop_task", expected_params={**kwargs})
return self.client.stop_task(**kwargs)
@stubbed
def tag_resource(self, **kwargs):
"""
Only task tagging is stubbed; other resources won't work
"""
tags = kwargs.get("tags")
arn = kwargs.get("resourceArn")
if self._task_exists(arn):
self.stubber.add_response(
method="tag_resource",
service_response={},
expected_params={**kwargs},
)
self.tags[arn] = tags
else:
self.stubber.add_client_error(method="tag_resource", expected_params={**kwargs})
return self.client.tag_resource(**kwargs)
def _task_exists(self, arn):
for task in itertools.chain.from_iterable(self.tasks.values()):
if task["taskArn"] == arn:
return True
return False
def _arn(self, resource_type, resource_id):
return f"arn:aws:ecs:us-east-1:1234567890:{resource_type}/{resource_id}"
def _cluster(self, cluster):
return (cluster or "default").split("/")[-1]
def _cluster_arn(self, cluster):
return self._arn("cluster", self._cluster(cluster))
def _task_arn(self, cluster):
return self._arn("task", f"{self._cluster(cluster)}/{uuid.uuid4()}")
def _task_definition_arn(self, family, revision):
return self._arn("task-definition", f"{family}:{revision}")
|
462028
|
from abc import abstractmethod
class BaseModule:
"""
A module is basically a class that is called to processes a line of text
and returns the same line (A), a modified version of that line (B) or None (C)
Where:
A = there is nothing to process
B = the line got processed by the module
C = the line should not be stored (e.g do not write this line to output)
"""
@abstractmethod
def process(self, line):
pass
|
462030
|
import unittest
import torch
import os
import sys
ROOT = os.path.join(os.path.dirname(os.path.realpath(__file__)), "..")
sys.path.insert(0, ROOT)
from torch_points_kernels.cluster import grow_proximity, region_grow
class TestGrow(unittest.TestCase):
def setUp(self):
self.pos = torch.tensor(
[
[0, 0, 0],
[1, 0, 0],
[2, 0, 0],
[10, 0, 0],
[0, 0, 0],
[1, 0, 0],
[2, 0, 0],
[10, 0, 0],
]
)
self.batch = torch.tensor([0, 0, 0, 0, 1, 1, 1, 1])
self.labels = torch.tensor([0, 0, 1, 1, 0, 1, 1, 10])
def test_simple(self):
clusters = grow_proximity(self.pos, self.batch, radius=2, min_cluster_size=1)
self.assertEqual(clusters, [[0, 1, 2], [3], [4, 5, 6], [7]])
clusters = grow_proximity(self.pos, self.batch, radius=2, min_cluster_size=3)
self.assertEqual(clusters, [[0, 1, 2], [4, 5, 6]])
def test_region_grow(self):
cluster_idx = region_grow(self.pos, self.labels, self.batch, radius=2, min_cluster_size=1)
self.assertEqual(len(cluster_idx), 6)
torch.testing.assert_allclose(cluster_idx[0], torch.tensor([0, 1]))
torch.testing.assert_allclose(cluster_idx[1], torch.tensor([4]))
torch.testing.assert_allclose(cluster_idx[2], torch.tensor([2]))
torch.testing.assert_allclose(cluster_idx[3], torch.tensor([3]))
torch.testing.assert_allclose(cluster_idx[4], torch.tensor([5, 6]))
torch.testing.assert_allclose(cluster_idx[5], torch.tensor([7]))
if __name__ == "__main__":
unittest.main()
|
462061
|
import os
import time
import math
import basis
import numpy as np
import modeling.geometric_model as gm
import modeling.collision_model as cm
import robot_sim.robots.xarm7_shuidi_mobile.xarm7_shuidi_mobile as xav
if __name__ == '__main__':
import copy
import motion.probabilistic.rrt_connect as rrtc
import visualization.panda.rpc.rviz_client as rv_client
# # local code
global_frame = gm.gen_frame()
# define robot_s and robot_s anime info
robot_s = xav.XArm7YunjiMobile()
robot_meshmodel_parameters = [None, # tcp_jntid
None, # tcp_loc_pos
None, # tcp_loc_rotmat
False, # toggle_tcpcs
False, # toggle_jntscs
[0, .7, 0, .3], # rgba
'auto'] # name
# define object and object anime info
objfile = os.path.join(basis.__path__[0], 'objects', 'bunnysim.stl')
obj = cm.CollisionModel(objfile)
obj_parameters = [[.3, .2, .1, 1]] # rgba
obj_path = [[np.array([.85, 0, .17]), np.eye(3)]] # [pos, rotmat]
obj.set_pos(np.array([.85, 0, .17]))
obj.set_rgba([.5, .7, .3, .1])
# remote code
rvc = rv_client.RVizClient(host="localhost:182001")
rvc.reset()
rvc.load_common_definition(__file__, line_ids = range(1,8))
rvc.change_campos_and_lookatpos(np.array([5,0,2]), np.array([0,0,.5]))
# copy to remote
rmt_global_frame = rvc.showmodel_to_remote(global_frame)
rmt_bunny = rvc.showmodel_to_remote(obj)
rmt_robot_s = rvc.copy_to_remote(robot_s)
# rvc.show_stationary_obj(rmt_obj)
robot_component_name = 'arm'
robot_s.fk(component_name=robot_component_name, jnt_values=np.array([0, math.pi * 2 / 3, 0, math.pi, 0, -math.pi / 6, 0]))
rrtc_planner = rrtc.RRTConnect(robot_s)
path = rrtc_planner.plan(start_conf=np.array([0, math.pi * 2 / 3, 0, math.pi, 0, -math.pi / 6, 0]),
goal_conf=np.array([math.pi / 3, math.pi * 1 / 3, 0, math.pi / 2, 0, math.pi / 6, 0]),
obstacle_list=[obj],
ext_dist=.1,
max_time=300,
component_name=robot_component_name)
rmt_anime_robotinfo = rvc.add_anime_robot(rmt_robot_s=rmt_robot_s,
loc_robot_component_name=robot_component_name,
loc_robot_meshmodel_parameters=robot_meshmodel_parameters,
loc_robot_motion_path=path)
# rmt_robot_meshmodel = rvc.add_stationary_robot(rmt_robot_s=rmt_robot_s,
# loc_robot_s=robot_s)
time.sleep(1)
# # draw sequence, problem: cannot work together with anime? (lost poses) -> cannot use the same remote instance
# rmt_robot_mesh_list = []
# newpath = copy.deepcopy(path)
# rmt_robot_s2 = rvc.copy_to_remote(robot_s)
# while True:
# for pose in newpath:
# robot_s.fk(hnd_name='arm', jnt_values=pose)
# # rmt_robot_mesh_list.append(rvc.showmodel_to_remote(robot_s.gen_meshmodel()))
# rmt_robot_mesh_list.append(rvc.add_stationary_robot(rmt_robot_s2, robot_s))
# time.sleep(.1)
# rvc.reset()
# rvc.load_common_definition(__file__, line_ids=range(1, 8))
# rvc.change_campos_and_lookatpos(np.array([5, 0, 2]), np.array([0, 0, .5]))
# time.sleep(.1)
# rvc.delete_anime_robot(rmt_anime_robotinfo)
# rvc.delete_stationary_robot(rmt_robot_meshmodel)
# robot_s.fk(path[-1], hnd_name=robot_component_name)
# rmt_robot_meshmodel = rvc.add_stationary_robot(rmt_robot_s='robot_s', loc_robot_s=robot_s)
# obj.set_pos(obj.get_pos()+np.array([0,.1,0]))
# obj.set_rgba([1,0,0,1])
# rvc.update_remote(rmt_bunny, obj)
|
462088
|
from .data import DataSet, DataCollate
from .embedding import Model as Embedding
from .model import Model
|
462132
|
from typing import Tuple, List, Mapping, Optional, Union
import base64
from io import BytesIO, BufferedReader
from .command_builder import BitcoinCommandBuilder, BitcoinInsType
from ...common import Chain
from .client_command import ClientCommandInterpreter
from .client_base import Client, TransportClient
from .client_legacy import LegacyClient
from .exception import DeviceException, NotSupportedError
from .merkle import get_merkleized_map_commitment
from .wallet import Wallet, WalletType, PolicyMapWallet
from ...psbt import PSBT
from ..._serialize import deser_string
def parse_stream_to_map(f: BufferedReader) -> Mapping[bytes, bytes]:
result = {}
while True:
try:
key = deser_string(f)
except Exception:
break
# Check for separator
if len(key) == 0:
break
value = deser_string(f)
result[key] = value
return result
class NewClient(Client):
# internal use for testing: if set to True, sign_psbt will not clone the psbt before converting to psbt version 2
_no_clone_psbt: bool = False
def __init__(self, comm_client: TransportClient, chain: Chain = Chain.MAIN) -> None:
super().__init__(comm_client, chain)
self.builder = BitcoinCommandBuilder()
# Modifies the behavior of the base method by taking care of SW_INTERRUPTED_EXECUTION responses
def _make_request(
self, apdu: dict, client_intepreter: ClientCommandInterpreter = None
) -> Tuple[int, bytes]:
sw, response = self._apdu_exchange(apdu)
while sw == 0xE000:
if not client_intepreter:
raise RuntimeError("Unexpected SW_INTERRUPTED_EXECUTION received.")
command_response = client_intepreter.execute(response)
sw, response = self._apdu_exchange(
self.builder.continue_interrupted(command_response)
)
return sw, response
def get_extended_pubkey(self, path: str, display: bool = False) -> str:
sw, response = self._make_request(self.builder.get_extended_pubkey(path, display))
if sw != 0x9000:
raise DeviceException(error_code=sw, ins=BitcoinInsType.GET_EXTENDED_PUBKEY)
return response.decode()
def register_wallet(self, wallet: Wallet) -> Tuple[bytes, bytes]:
if wallet.type != WalletType.POLICYMAP:
raise ValueError("wallet type must be POLICYMAP")
client_intepreter = ClientCommandInterpreter()
client_intepreter.add_known_preimage(wallet.serialize())
client_intepreter.add_known_list([k.encode() for k in wallet.keys_info])
sw, response = self._make_request(
self.builder.register_wallet(wallet), client_intepreter
)
if sw != 0x9000:
raise DeviceException(error_code=sw, ins=BitcoinInsType.REGISTER_WALLET)
if len(response) != 64:
raise RuntimeError(f"Invalid response length: {len(response)}")
wallet_id = response[0:32]
wallet_hmac = response[32:64]
return wallet_id, wallet_hmac
def get_wallet_address(
self,
wallet: Wallet,
wallet_hmac: Optional[bytes],
change: int,
address_index: int,
display: bool,
) -> str:
if wallet.type != WalletType.POLICYMAP or not isinstance(
wallet, PolicyMapWallet
):
raise ValueError("wallet type must be POLICYMAP")
if change != 0 and change != 1:
raise ValueError("Invalid change")
client_intepreter = ClientCommandInterpreter()
client_intepreter.add_known_list([k.encode() for k in wallet.keys_info])
client_intepreter.add_known_preimage(wallet.serialize())
sw, response = self._make_request(
self.builder.get_wallet_address(
wallet, wallet_hmac, address_index, change, display
),
client_intepreter,
)
if sw != 0x9000:
raise DeviceException(error_code=sw, ins=BitcoinInsType.GET_WALLET_ADDRESS)
return response.decode()
def sign_psbt(self, psbt: PSBT, wallet: Wallet, wallet_hmac: Optional[bytes]) -> Mapping[int, bytes]:
"""Signs a PSBT using a registered wallet (or a standard wallet that does not need registration).
Signature requires explicit approval from the user.
Parameters
----------
psbt : PSBT
A PSBT of version 0 or 2, with all the necessary information to sign the inputs already filled in; what the
required fields changes depending on the type of input.
The non-witness UTXO must be present for both legacy and SegWit inputs, or the hardware wallet will reject
signing (this will change for Taproot inputs).
wallet : Wallet
The registered wallet policy, or a standard wallet policy.
wallet_hmac: Optional[bytes]
For a registered wallet, the hmac obtained at wallet registration. `None` for a standard wallet policy.
Returns
-------
Mapping[int, bytes]
A mapping that has as keys the indexes of inputs that the Hardware Wallet signed, and the corresponding signatures as values.
"""
assert psbt.version == 2
psbt_v2 = psbt
psbt_bytes = base64.b64decode(psbt_v2.serialize())
f = BytesIO(psbt_bytes)
# We parse the individual maps (global map, each input map, and each output map) from the psbt serialized as a
# sequence of bytes, in order to produce the serialized Merkleized map commitments. Moreover, we prepare the
# client interpreter to respond on queries on all the relevant Merkle trees and pre-images in the psbt.
assert f.read(5) == b"psbt\xff"
client_intepreter = ClientCommandInterpreter()
client_intepreter.add_known_list([k.encode() for k in wallet.keys_info])
client_intepreter.add_known_preimage(wallet.serialize())
global_map: Mapping[bytes, bytes] = parse_stream_to_map(f)
client_intepreter.add_known_mapping(global_map)
input_maps: List[Mapping[bytes, bytes]] = []
for _ in range(len(psbt_v2.inputs)):
input_maps.append(parse_stream_to_map(f))
for m in input_maps:
client_intepreter.add_known_mapping(m)
output_maps: List[Mapping[bytes, bytes]] = []
for _ in range(len(psbt_v2.outputs)):
output_maps.append(parse_stream_to_map(f))
for m in output_maps:
client_intepreter.add_known_mapping(m)
# We also add the Merkle tree of the input (resp. output) map commitments as a known tree
input_commitments = [get_merkleized_map_commitment(m_in) for m_in in input_maps]
output_commitments = [get_merkleized_map_commitment(m_out) for m_out in output_maps]
client_intepreter.add_known_list(input_commitments)
client_intepreter.add_known_list(output_commitments)
sw, _ = self._make_request(
self.builder.sign_psbt(
global_map, input_maps, output_maps, wallet, wallet_hmac
),
client_intepreter,
)
if sw != 0x9000:
raise DeviceException(error_code=sw, ins=BitcoinInsType.SIGN_PSBT)
# parse results and return a structured version instead
results = client_intepreter.yielded
if any(len(x) <= 1 for x in results):
raise RuntimeError("Invalid response")
return {int(res[0]): res[1:] for res in results}
def get_master_fingerprint(self) -> bytes:
sw, response = self._make_request(self.builder.get_master_fingerprint())
if sw != 0x9000:
raise DeviceException(error_code=sw, ins=BitcoinInsType.GET_EXTENDED_PUBKEY)
return response
def createClient(comm_client: Optional[TransportClient] = None, chain: Chain = Chain.MAIN, debug: bool = False) -> Union[LegacyClient, NewClient]:
if comm_client is None:
comm_client = TransportClient("hid", debug=debug)
base_client = Client(comm_client, chain)
app_name, app_version, _ = base_client.get_version()
if app_name not in ["Bitcoin", "Bitcoin Test", "app"]:
raise NotSupportedError(0x6A82, None, "Ledger is not in either the Bitcoin or Bitcoin Testnet app")
if app_version >= "2":
return NewClient(comm_client, chain)
else:
return LegacyClient(comm_client, chain)
|
462134
|
import ugfx
period = 1 * 1000
needs_icon = True
i = 0
def tick(icon):
global i
i += 1
icon.show()
ugfx.set_default_font("c*")
icon.area(0, 0, icon.width(), icon.height(), 0xFFFF)
icon.text(0, 0, str(i), 0)
return "Test: %d"% i
|
462135
|
from app import add
i = 0
while True:
add.delay(4, 5)
add.delay(10, 20)
add.delay(100, 20)
i += 1
if i == 10000:
break
|
462157
|
from unittest import TestCase
from cmlkit.utility.indices import *
class TestFourwaySplit(TestCase):
def setUp(self):
self.n = 40
self.k_train = 7
self.k_test = 5
self.k_valid = 15
self.a, self.b, self.c, self.d = fourway_split(
self.n, self.k_train, self.k_test, self.k_valid
)
def test_sizes(self):
self.assertEqual(
[len(self.a), len(self.b), len(self.c), len(self.d)],
[40 - 5 - 15 - 7, 7, 5, 15],
)
def test_union_is_all(self):
union = np.union1d(self.a, self.b)
union = np.union1d(union, self.c)
union = np.union1d(union, self.d)
self.assertEqual(union.all(), np.array(np.arange(self.n)).all())
class TestTwowaySplit(TestCase):
def setUp(self):
self.n = 40
self.k_test = 10
self.a, self.b = twoway_split(self.n, self.k_test)
def test_sizes(self):
self.assertEqual([len(self.a), len(self.b)], [40 - 10, 10])
def test_union_is_all(self):
union = np.union1d(self.a, self.b)
self.assertEqual(union.all(), np.array(np.arange(self.n)).all())
class TestThreewaySplit(TestCase):
def setUp(self):
self.n = 40
self.k_test = 5
self.k_valid = 15
self.a, self.b, self.c = threeway_split(self.n, self.k_test, self.k_valid)
def test_sizes(self):
self.assertEqual([len(self.a), len(self.b), len(self.c)], [40 - 5 - 15, 5, 15])
def test_union_is_all(self):
union = np.union1d(self.a, self.b)
union = np.union1d(union, self.c)
self.assertEqual(union.all(), np.array(np.arange(self.n)).all())
class TestGenerateIndices(TestCase):
def test_make_range_if_int(self):
ind = generate_indices(6, [])
self.assertEqual(ind.all(), np.arange(6).all())
def test_pass_through_index_array(self):
ind = generate_indices(np.arange(6), [])
self.assertEqual(ind.all(), np.arange(6).all())
def test_exclude(self):
ind = generate_indices(6, [3])
self.assertFalse(3 in ind)
class TestGenerateDistinctSets(TestCase):
def test_element_numbers_sum_up(self):
n = 78
full = np.arange(n)
k = 64
a, b = generate_distinct_sets(full, k)
self.assertEqual(len(a) + len(b), n)
def test_union_is_all(self):
n = 78
full = np.arange(n)
k = 3
a, b = generate_distinct_sets(full, k)
self.assertEqual(np.union1d(a, b).all(), np.array(full).all())
def test_set_disjunct(self):
n = 78
full = np.arange(n)
k = 3
a, b = generate_distinct_sets(full, k)
self.assertTrue(np.intersect1d(a, b).size == 0)
|
462184
|
from pathlib import Path
import pytest
import spacy
import tomli
from src.spacy_html_tokenizer import __version__, create_html_tokenizer
def test_version():
version = "0.1.3"
pyproject = tomli.loads(Path("pyproject.toml").read_text())
assert pyproject["tool"]["poetry"]["version"] == version
assert __version__ == version
@pytest.fixture
def tokenizer():
nlp = spacy.blank("en")
tokenizer = create_html_tokenizer()(nlp)
return tokenizer
@pytest.fixture
def pipeline():
nlp = spacy.load("en_core_web_sm", disable=["tagger", "ner", "lemmatizer"])
nlp.tokenizer = create_html_tokenizer()(nlp)
return nlp
@pytest.fixture
def html1():
html = """<h2>An Ordered HTML List</h2>
<ol>
<li><b>Good</b> Coffee. There's another sentence here</li>
<li>Tea and honey</li>
<li>Milk</li>
</ol>"""
return html
@pytest.fixture
def html2():
html = """
<body>
<span id="vspan"></span>
<h1>Welcome to selectolax tutorial</h1>
<div id="text">
<p class='p3' style='display:none;'>Excepteur <i>sint</i> occaecat cupidatat non proident</p>
<p class='p3' vid>Lorem ipsum</p>
</div>
<div>
<p id='stext'>Lorem ipsum dolor sit amet, ea quo modus meliore platonem.</p>
</div>
</body>
"""
return html
@pytest.fixture
def html_with_script():
html = """<h2>An Ordered HTML List</h2>
<script>pretend some javascript is here</script>"""
return html
def test_html1(tokenizer, html1):
sent_starts = [0, 4, 12, 15]
doc = tokenizer(html1)
assert len(doc) == 16
assert len(list(doc.sents)) == 4
for i, token in enumerate(doc):
if i in sent_starts:
assert token.is_sent_start
if i not in sent_starts:
assert token.is_sent_start is None
def test_html2(tokenizer, html2):
sent_starts = [0, 4, 10, 12]
doc = tokenizer(html2)
assert len(doc) == 24
assert len(list(doc.sents)) == 4
for i, token in enumerate(doc):
if i in sent_starts:
assert token.is_sent_start
if i not in sent_starts:
assert token.is_sent_start is None
def test_html1_with_sentence_boundary(pipeline, html1):
doc = pipeline(html1)
# We include the dependency parser in this one to check that
# additional sentence boundary detection still works
assert len(list(doc.sents)) == 5
def test_html_with_script(pipeline, html_with_script):
doc = pipeline(html_with_script)
assert len(list(doc.sents)) == 1
|
462196
|
import numpy as np
class NNet():
"""
Class that represents a fully connected ReLU network from a .nnet file
Args:
filename (str): A .nnet file to load
Attributes:
numLayers (int): Number of weight matrices or bias vectors in neural network
layerSizes (list of ints): Size of input layer, hidden layers, and output layer
inputSize (int): Size of input
outputSize (int): Size of output
mins (list of floats): Minimum values of inputs
maxes (list of floats): Maximum values of inputs
means (list of floats): Means of inputs and mean of outputs
ranges (list of floats): Ranges of inputs and range of outputs
weights (list of numpy arrays): Weight matrices in network
biases (list of numpy arrays): Bias vectors in network
"""
def __init__ (self, filename):
with open(filename) as f:
line = f.readline()
cnt = 1
while line[0:2] == "//":
line=f.readline()
cnt+= 1
#numLayers does't include the input layer!
numLayers, inputSize, outputSize, _ = [int(x) for x in line.strip().split(",")[:-1]]
line=f.readline()
#input layer size, layer1size, layer2size...
layerSizes = [int(x) for x in line.strip().split(",")[:-1]]
line=f.readline()
symmetric = int(line.strip().split(",")[0])
line = f.readline()
inputMinimums = [float(x) for x in line.strip().split(",")[:-1]]
line = f.readline()
inputMaximums = [float(x) for x in line.strip().split(",")[:-1]]
line = f.readline()
inputMeans = [float(x) for x in line.strip().split(",")[:-1]]
line = f.readline()
inputRanges = [float(x) for x in line.strip().split(",")[:-1]]
weights=[]
biases = []
for layernum in range(numLayers):
previousLayerSize = layerSizes[layernum]
currentLayerSize = layerSizes[layernum+1]
weights.append([])
biases.append([])
weights[layernum] = np.zeros((currentLayerSize,previousLayerSize))
for i in range(currentLayerSize):
line=f.readline()
aux = [float(x) for x in line.strip().split(",")[:-1]]
for j in range(previousLayerSize):
weights[layernum][i,j] = aux[j]
#biases
biases[layernum] = np.zeros(currentLayerSize)
for i in range(currentLayerSize):
line=f.readline()
x = float(line.strip().split(",")[0])
biases[layernum][i] = x
self.numLayers = numLayers
self.layerSizes = layerSizes
self.inputSize = inputSize
self.outputSize = outputSize
self.mins = inputMinimums
self.maxes = inputMaximums
self.means = inputMeans
self.ranges = inputRanges
self.weights = weights
self.biases = biases
def evaluate_network(self, inputs):
'''
Evaluate network using given inputs
Args:
inputs (numpy array of floats): Network inputs to be evaluated
Returns:
(numpy array of floats): Network output
'''
numLayers = self.numLayers
inputSize = self.inputSize
outputSize = self.outputSize
biases = self.biases
weights = self.weights
# Prepare the inputs to the neural network
inputsNorm = np.zeros(inputSize)
for i in range(inputSize):
if inputs[i]<self.mins[i]:
inputsNorm[i] = (self.mins[i]-self.means[i])/self.ranges[i]
elif inputs[i]>self.maxes[i]:
inputsNorm[i] = (self.maxes[i]-self.means[i])/self.ranges[i]
else:
inputsNorm[i] = (inputs[i]-self.means[i])/self.ranges[i]
# Evaluate the neural network
for layer in range(numLayers-1):
inputsNorm = np.maximum(np.dot(weights[layer],inputsNorm)+biases[layer],0)
outputs = np.dot(weights[-1],inputsNorm)+biases[-1]
# Undo output normalization
for i in range(outputSize):
outputs[i] = outputs[i]*self.ranges[-1]+self.means[-1]
return outputs
def evaluate_network_multiple(self,inputs):
'''
Evaluate network using multiple sets of inputs
Args:
inputs (numpy array of floats): Array of network inputs to be evaluated.
Returns:
(numpy array of floats): Network outputs for each set of inputs
'''
numLayers = self.numLayers
inputSize = self.inputSize
outputSize = self.outputSize
biases = self.biases
weights = self.weights
inputs = np.array(inputs).T
# Prepare the inputs to the neural network
numInputs = inputs.shape[1]
inputsNorm = np.zeros((inputSize,numInputs))
for i in range(inputSize):
for j in range(numInputs):
if inputs[i,j]<self.mins[i]:
inputsNorm[i,j] = (self.mins[i]-self.means[i])/self.ranges[i]
elif inputs[i,j] > self.maxes[i]:
inputsNorm[i,j] = (self.maxes[i]-self.means[i])/self.ranges[i]
else:
inputsNorm[i,j] = (inputs[i,j]-self.means[i])/self.ranges[i]
# Evaluate the neural network
for layer in range(numLayers-1):
inputsNorm = np.maximum(np.dot(weights[layer],inputsNorm)+biases[layer].reshape((len(biases[layer]),1)),0)
outputs = np.dot(weights[-1],inputsNorm)+biases[-1].reshape((len(biases[-1]),1))
# Undo output normalization
for i in range(outputSize):
for j in range(numInputs):
outputs[i,j] = outputs[i,j]*self.ranges[-1]+self.means[-1]
return outputs.T
def num_inputs(self):
''' Get network input size'''
return self.inputSize
def num_outputs(self):
''' Get network output size'''
return self.outputSize
|
462208
|
from typing import NamedTuple, List
from pokeai.ai.battle_status import BattleStatus
from pokeai.ai.common import PossibleAction
class RLPolicyObservation(NamedTuple):
battle_status: BattleStatus
request: dict
possible_actions: List[PossibleAction]
|
462244
|
def greet(name):
print(f'Hello, {name}!')
def test_prints(capsys):
# call the function
greet('Escape School 2021')
# test that it wrote what we expect to stdout
captured = capsys.readouterr()
# .err would be the stderr output
assert captured.out == 'Hello, Escape School 2021!\n'
|
462255
|
import fastai
from fastai import *
from fastai.core import *
from fastai.vision.transform import get_transforms
from fastai.vision.data import ImageImageList, ImageDataBunch, imagenet_stats
from .augs import noisify
def get_colorize_data(
sz: int,
bs: int,
crappy_path: Path,
good_path: Path,
random_seed: int = None,
keep_pct: float = 1.0,
num_workers: int = 8,
stats: tuple = imagenet_stats,
xtra_tfms=[],
) -> ImageDataBunch:
src = (
ImageImageList.from_folder(crappy_path, convert_mode='RGB')
.use_partial_data(sample_pct=keep_pct, seed=random_seed)
.split_by_rand_pct(0.1, seed=random_seed)
)
data = (
src.label_from_func(lambda x: good_path / x.relative_to(crappy_path))
.transform(
get_transforms(
max_zoom=1.2, max_lighting=0.5, max_warp=0.25, xtra_tfms=xtra_tfms
),
size=sz,
tfm_y=True,
)
.databunch(bs=bs, num_workers=num_workers, no_check=True)
.normalize(stats, do_y=True)
)
data.c = 3
return data
def get_dummy_databunch() -> ImageDataBunch:
path = Path('./dummy/')
return get_colorize_data(
sz=1, bs=1, crappy_path=path, good_path=path, keep_pct=0.001
)
|
462260
|
import numpy as np
import gzip
import pickle
import os
class MNIST:
def __init__(self, batch_size):
self.batch_size = batch_size
train, valid, test = self._load_data()
self.X_train, self.y_train = train[0], train[1]
# encoding y_train using one-hot encoding
self.y_train_one_hot = np.zeros((self.y_train.shape[0], 10))
self.y_train_one_hot[np.arange(self.y_train.shape[0]), self.y_train] = 1
self.X_valid, self.y_valid = valid[0], valid[1]
self.X_test, self.y_test = test[0], test[1]
def train_batch_generator(self):
while True:
rand_indices = np.random.choice(self.X_train.shape[0], self.batch_size, False)
yield self.X_train[rand_indices], self.y_train_one_hot[rand_indices]
def validation(self):
return self.X_valid, self.y_valid
def testing(self):
return self.X_test, self.y_test
def num_features(self):
return self.X_train.shape[1]
def _load_data(self):
script_dir = os.path.dirname(__file__)
mnist_file = os.path.join(os.path.join(script_dir, 'data'), 'mnist.pkl.gz')
with gzip.open(mnist_file, 'rb') as mnist_file:
u = pickle._Unpickler(mnist_file)
u.encoding = 'latin1'
train, val, test = u.load()
return train, val, test
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.