ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py
|
1a59d6bfe21e9de66627eaa1176c211702795d30
|
from cryptography.fernet import Fernet
def read_key():
file = open('key.ley', 'rb')
key = file.read()
file.close()
return key
def encrpyt(data):
key = read_key()
encoded_data = data.encode()
f = Fernet(key)
encrypted = f.encrypt(encoded_data)
encrypted_decoded_data = encrypted.decode()
return encrypted_decoded_data
def decrpyt(data):
key = read_key()
encoded_data = data.encode()
f = Fernet(key)
decrpyted = decrpyted = f.decrypt(encoded_data)
decrpyted_decoded_data = decrpyted.decode()
return decrpyted_decoded_data
|
py
|
1a59d71687862b0f4e4d82a439f6b407e398384e
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
# Description: an implementation of a deep learning recommendation model (DLRM)
# The model input consists of dense and sparse features. The former is a vector
# of floating point values. The latter is a list of sparse indices into
# embedding tables, which consist of vectors of floating point values.
# The selected vectors are passed to mlp networks denoted by triangles,
# in some cases the vectors are interacted through operators (Ops).
#
# output:
# vector of values
# model: |
# /\
# /__\
# |
# _____________________> Op <___________________
# / | \
# /\ /\ /\
# /__\ /__\ ... /__\
# | | |
# | Op Op
# | ____/__\_____ ____/__\____
# | |_Emb_|____|__| ... |_Emb_|__|___|
# input:
# [ dense features ] [sparse indices] , ..., [sparse indices]
#
# More precise definition of model layers:
# 1) fully connected layers of an mlp
# z = f(y)
# y = Wx + b
#
# 2) embedding lookup (for a list of sparse indices p=[p1,...,pk])
# z = Op(e1,...,ek)
# obtain vectors e1=E[:,p1], ..., ek=E[:,pk]
#
# 3) Operator Op can be one of the following
# Sum(e1,...,ek) = e1 + ... + ek
# Dot(e1,...,ek) = [e1'e1, ..., e1'ek, ..., ek'e1, ..., ek'ek]
# Cat(e1,...,ek) = [e1', ..., ek']'
# where ' denotes transpose operation
#
# References:
# [1] Maxim Naumov, Dheevatsa Mudigere, Hao-Jun Michael Shi, Jianyu Huang,
# Narayanan Sundaram, Jongsoo Park, Xiaodong Wang, Udit Gupta, Carole-Jean Wu,
# Alisson G. Azzolini, Dmytro Dzhulgakov, Andrey Mallevich, Ilia Cherniavskii,
# Yinghai Lu, Raghuraman Krishnamoorthi, Ansha Yu, Volodymyr Kondratenko,
# Stephanie Pereira, Xianjie Chen, Wenlin Chen, Vijay Rao, Bill Jia, Liang Xiong,
# Misha Smelyanskiy, "Deep Learning Recommendation Model for Personalization and
# Recommendation Systems", CoRR, arXiv:1906.00091, 2019
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
# miscellaneous
import builtins
import datetime
import json
import sys
import time
# from ppuda.ghn.nn import GHN2
# onnx
# The onnx import causes deprecation warnings every time workers
# are spawned during testing. So, we filter out those warnings.
import warnings
# from ppuda.utils.utils import adjust_net
# data generation
import dlrm_data_pytorch as dp
# For distributed run
import extend_distributed as ext_dist
import mlperf_logger
# numpy
import numpy as np
import sklearn.metrics
# pytorch
import torch
import torch.nn as nn
from torch._ops import ops
from torch.autograd.profiler import record_function
from torch.nn.parallel.parallel_apply import parallel_apply
from torch.nn.parallel.replicate import replicate
from torch.nn.parallel.scatter_gather import gather, scatter
from torch.nn.parameter import Parameter
from torch.optim.lr_scheduler import _LRScheduler
import optim.rwsadagrad as RowWiseSparseAdagrad
from torch.utils.tensorboard import SummaryWriter
# mixed-dimension trick
from tricks.md_embedding_bag import PrEmbeddingBag, md_solver
# quotient-remainder trick
from tricks.qr_embedding_bag import QREmbeddingBag
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
try:
import onnx
except ImportError as error:
print("Unable to import onnx. ", error)
# from torchviz import make_dot
# import torch.nn.functional as Functional
# from torch.nn.parameter import Parameter
exc = getattr(builtins, "IOError", "FileNotFoundError")
def time_wrap(use_gpu):
if use_gpu:
torch.cuda.synchronize()
return time.time()
def dlrm_wrap(X, lS_o, lS_i, use_gpu, device, ndevices=1):
with record_function("DLRM forward"):
if use_gpu: # .cuda()
# lS_i can be either a list of tensors or a stacked tensor.
# Handle each case below:
if ndevices == 1:
lS_i = (
[S_i.to(device) for S_i in lS_i]
if isinstance(lS_i, list)
else lS_i.to(device)
)
lS_o = (
[S_o.to(device) for S_o in lS_o]
if isinstance(lS_o, list)
else lS_o.to(device)
)
return dlrm(X.to(device), lS_o, lS_i)
def loss_fn_wrap(Z, T, use_gpu, device):
with record_function("DLRM loss compute"):
if args.loss_function == "mse" or args.loss_function == "bce":
return dlrm.loss_fn(Z, T.to(device))
elif args.loss_function == "wbce":
loss_ws_ = dlrm.loss_ws[T.data.view(-1).long()].view_as(T).to(device)
loss_fn_ = dlrm.loss_fn(Z, T.to(device))
loss_sc_ = loss_ws_ * loss_fn_
return loss_sc_.mean()
# The following function is a wrapper to avoid checking this multiple times in th
# loop below.
def unpack_batch(b):
# Experiment with unweighted samples
return b[0], b[1], b[2], b[3], torch.ones(b[3].size()), None
class LRPolicyScheduler(_LRScheduler):
def __init__(self, optimizer, num_warmup_steps, decay_start_step, num_decay_steps):
self.num_warmup_steps = num_warmup_steps
self.decay_start_step = decay_start_step
self.decay_end_step = decay_start_step + num_decay_steps
self.num_decay_steps = num_decay_steps
if self.decay_start_step < self.num_warmup_steps:
sys.exit("Learning rate warmup must finish before the decay starts")
super(LRPolicyScheduler, self).__init__(optimizer)
def get_lr(self):
step_count = self._step_count
if step_count < self.num_warmup_steps:
# warmup
scale = 1.0 - (self.num_warmup_steps - step_count) / self.num_warmup_steps
lr = [base_lr * scale for base_lr in self.base_lrs]
self.last_lr = lr
elif self.decay_start_step <= step_count and step_count < self.decay_end_step:
# decay
decayed_steps = step_count - self.decay_start_step
scale = ((self.num_decay_steps - decayed_steps) / self.num_decay_steps) ** 2
min_lr = 0.0000001
lr = [max(min_lr, base_lr * scale) for base_lr in self.base_lrs]
self.last_lr = lr
else:
if self.num_decay_steps > 0:
# freeze at last, either because we're after decay
# or because we're between warmup and decay
lr = self.last_lr
else:
# do not adjust
lr = self.base_lrs
return lr
### define dlrm in PyTorch ###
class DLRM_Net(nn.Module):
def create_mlp(self, ln, sigmoid_layer):
# build MLP layer by layer
layers = nn.ModuleList()
for i in range(0, ln.size - 1):
n = ln[i]
m = ln[i + 1]
# construct fully connected operator
LL = nn.Linear(int(n), int(m), bias=True)
# initialize the weights
# with torch.no_grad():
# custom Xavier input, output or two-sided fill
mean = 0.0 # std_dev = np.sqrt(variance)
std_dev = np.sqrt(2 / (m + n)) # np.sqrt(1 / m) # np.sqrt(1 / n)
W = np.random.normal(mean, std_dev, size=(m, n)).astype(np.float32)
std_dev = np.sqrt(1 / m) # np.sqrt(2 / (m + 1))
bt = np.random.normal(mean, std_dev, size=m).astype(np.float32)
# approach 1
LL.weight.data = torch.tensor(W, requires_grad=True)
LL.bias.data = torch.tensor(bt, requires_grad=True)
# approach 2
# LL.weight.data.copy_(torch.tensor(W))
# LL.bias.data.copy_(torch.tensor(bt))
# approach 3
# LL.weight = Parameter(torch.tensor(W),requires_grad=True)
# LL.bias = Parameter(torch.tensor(bt),requires_grad=True)
layers.append(LL)
# construct sigmoid or relu operator
if i == sigmoid_layer:
layers.append(nn.Sigmoid())
else:
layers.append(nn.ReLU())
# approach 1: use ModuleList
# return layers
# approach 2: use Sequential container to wrap all layers
return torch.nn.Sequential(*layers)
def create_emb(self, m, ln, weighted_pooling=None):
emb_l = nn.ModuleList()
v_W_l = []
for i in range(0, ln.size):
if ext_dist.my_size > 1:
if i not in self.local_emb_indices:
continue
n = ln[i]
# construct embedding operator
if self.qr_flag and n > self.qr_threshold:
EE = QREmbeddingBag(
n,
m,
self.qr_collisions,
operation=self.qr_operation,
mode="sum",
sparse=True,
)
elif self.md_flag and n > self.md_threshold:
base = max(m)
_m = m[i] if n > self.md_threshold else base
EE = PrEmbeddingBag(n, _m, base)
# use np initialization as below for consistency...
W = np.random.uniform(
low=-np.sqrt(1 / n), high=np.sqrt(1 / n), size=(n, _m)
).astype(np.float32)
EE.embs.weight.data = torch.tensor(W, requires_grad=True)
else:
EE = nn.EmbeddingBag(n, m, mode="sum", sparse=False)
# initialize embeddings
# nn.init.uniform_(EE.weight, a=-np.sqrt(1 / n), b=np.sqrt(1 / n))
W = np.random.uniform(
low=-np.sqrt(1 / n), high=np.sqrt(1 / n), size=(n, m)
).astype(np.float32)
# approach 1
EE.weight.data = torch.tensor(W, requires_grad=True)
# approach 2
# EE.weight.data.copy_(torch.tensor(W))
# approach 3
# EE.weight = Parameter(torch.tensor(W),requires_grad=True)
if weighted_pooling is None:
v_W_l.append(None)
else:
v_W_l.append(torch.ones(n, dtype=torch.float32))
emb_l.append(EE)
return emb_l, v_W_l
def __init__(
self,
m_spa=None,
ln_emb=None,
ln_bot=None,
ln_top=None,
arch_interaction_op=None,
arch_interaction_itself=False,
sigmoid_bot=-1,
sigmoid_top=-1,
sync_dense_params=True,
loss_threshold=0.0,
ndevices=-1,
qr_flag=False,
qr_operation="mult",
qr_collisions=0,
qr_threshold=200,
md_flag=False,
md_threshold=200,
weighted_pooling=None,
loss_function="bce"
):
super(DLRM_Net, self).__init__()
if (
(m_spa is not None)
and (ln_emb is not None)
and (ln_bot is not None)
and (ln_top is not None)
and (arch_interaction_op is not None)
):
# save arguments
self.ndevices = ndevices
self.output_d = 0
self.parallel_model_batch_size = -1
self.parallel_model_is_not_prepared = True
self.arch_interaction_op = arch_interaction_op
self.arch_interaction_itself = arch_interaction_itself
self.sync_dense_params = sync_dense_params
self.loss_threshold = loss_threshold
self.loss_function=loss_function
if weighted_pooling is not None and weighted_pooling != "fixed":
self.weighted_pooling = "learned"
else:
self.weighted_pooling = weighted_pooling
# create variables for QR embedding if applicable
self.qr_flag = qr_flag
if self.qr_flag:
self.qr_collisions = qr_collisions
self.qr_operation = qr_operation
self.qr_threshold = qr_threshold
# create variables for MD embedding if applicable
self.md_flag = md_flag
if self.md_flag:
self.md_threshold = md_threshold
# If running distributed, get local slice of embedding tables
if ext_dist.my_size > 1:
n_emb = len(ln_emb)
if n_emb < ext_dist.my_size:
sys.exit(
"only (%d) sparse features for (%d) devices, table partitions will fail"
% (n_emb, ext_dist.my_size)
)
self.n_global_emb = n_emb
self.n_local_emb, self.n_emb_per_rank = ext_dist.get_split_lengths(
n_emb
)
self.local_emb_slice = ext_dist.get_my_slice(n_emb)
self.local_emb_indices = list(range(n_emb))[self.local_emb_slice]
# create operators
if ndevices <= 1:
self.emb_l, w_list = self.create_emb(m_spa, ln_emb, weighted_pooling)
if self.weighted_pooling == "learned":
self.v_W_l = nn.ParameterList()
for w in w_list:
self.v_W_l.append(Parameter(w))
else:
self.v_W_l = w_list
self.bot_l = self.create_mlp(ln_bot, sigmoid_bot)
self.top_l = self.create_mlp(ln_top, sigmoid_top)
# quantization
self.quantize_emb = False
self.emb_l_q = []
self.quantize_bits = 32
# specify the loss function
if self.loss_function == "mse":
self.loss_fn = torch.nn.MSELoss(reduction="mean")
elif self.loss_function == "bce":
self.loss_fn = torch.nn.BCELoss(reduction="mean")
elif self.loss_function == "wbce":
self.loss_ws = torch.tensor(
np.fromstring(args.loss_weights, dtype=float, sep="-")
)
self.loss_fn = torch.nn.BCELoss(reduction="none")
else:
sys.exit(
"ERROR: --loss-function=" + self.loss_function + " is not supported"
)
def apply_mlp(self, x, layers):
# approach 1: use ModuleList
# for layer in layers:
# x = layer(x)
# return x
# approach 2: use Sequential container to wrap all layers
return layers(x)
def apply_emb(self, lS_o, lS_i, emb_l, v_W_l):
# WARNING: notice that we are processing the batch at once. We implicitly
# assume that the data is laid out such that:
# 1. each embedding is indexed with a group of sparse indices,
# corresponding to a single lookup
# 2. for each embedding the lookups are further organized into a batch
# 3. for a list of embedding tables there is a list of batched lookups
ly = []
for k, sparse_index_group_batch in enumerate(lS_i):
sparse_offset_group_batch = lS_o[k]
# embedding lookup
# We are using EmbeddingBag, which implicitly uses sum operator.
# The embeddings are represented as tall matrices, with sum
# happening vertically across 0 axis, resulting in a row vector
# E = emb_l[k]
if v_W_l[k] is not None:
per_sample_weights = v_W_l[k].gather(0, sparse_index_group_batch)
else:
per_sample_weights = None
if self.quantize_emb:
s1 = self.emb_l_q[k].element_size() * self.emb_l_q[k].nelement()
s2 = self.emb_l_q[k].element_size() * self.emb_l_q[k].nelement()
print("quantized emb sizes:", s1, s2)
if self.quantize_bits == 4:
QV = ops.quantized.embedding_bag_4bit_rowwise_offsets(
self.emb_l_q[k],
sparse_index_group_batch,
sparse_offset_group_batch,
per_sample_weights=per_sample_weights,
)
elif self.quantize_bits == 8:
QV = ops.quantized.embedding_bag_byte_rowwise_offsets(
self.emb_l_q[k],
sparse_index_group_batch,
sparse_offset_group_batch,
per_sample_weights=per_sample_weights,
)
ly.append(QV)
else:
E = emb_l[k]
V = E(
sparse_index_group_batch,
sparse_offset_group_batch,
per_sample_weights=per_sample_weights,
)
ly.append(V)
# print(ly)
return ly
# using quantizing functions from caffe2/aten/src/ATen/native/quantized/cpu
def quantize_embedding(self, bits):
n = len(self.emb_l)
self.emb_l_q = [None] * n
for k in range(n):
if bits == 4:
self.emb_l_q[k] = ops.quantized.embedding_bag_4bit_prepack(
self.emb_l[k].weight
)
elif bits == 8:
self.emb_l_q[k] = ops.quantized.embedding_bag_byte_prepack(
self.emb_l[k].weight
)
else:
return
self.emb_l = None
self.quantize_emb = True
self.quantize_bits = bits
def interact_features(self, x, ly):
if self.arch_interaction_op == "dot":
# concatenate dense and sparse features
(batch_size, d) = x.shape
T = torch.cat([x] + ly, dim=1).view((batch_size, -1, d))
# perform a dot product
Z = torch.bmm(T, torch.transpose(T, 1, 2))
# append dense feature with the interactions (into a row vector)
# approach 1: all
# Zflat = Z.view((batch_size, -1))
# approach 2: unique
_, ni, nj = Z.shape
# approach 1: tril_indices
# offset = 0 if self.arch_interaction_itself else -1
# li, lj = torch.tril_indices(ni, nj, offset=offset)
# approach 2: custom
offset = 1 if self.arch_interaction_itself else 0
li = torch.tensor([i for i in range(ni) for j in range(i + offset)])
lj = torch.tensor([j for i in range(nj) for j in range(i + offset)])
Zflat = Z[:, li, lj]
# concatenate dense features and interactions
R = torch.cat([x] + [Zflat], dim=1)
elif self.arch_interaction_op == "cat":
# concatenation features (into a row vector)
R = torch.cat([x] + ly, dim=1)
else:
sys.exit(
"ERROR: --arch-interaction-op="
+ self.arch_interaction_op
+ " is not supported"
)
return R
def forward(self, dense_x, lS_o, lS_i):
if ext_dist.my_size > 1:
# multi-node multi-device run
return self.distributed_forward(dense_x, lS_o, lS_i)
elif self.ndevices <= 1:
# single device run
return self.sequential_forward(dense_x, lS_o, lS_i)
else:
# single-node multi-device run
return self.parallel_forward(dense_x, lS_o, lS_i)
def distributed_forward(self, dense_x, lS_o, lS_i):
batch_size = dense_x.size()[0]
# WARNING: # of ranks must be <= batch size in distributed_forward call
if batch_size < ext_dist.my_size:
sys.exit(
"ERROR: batch_size (%d) must be larger than number of ranks (%d)"
% (batch_size, ext_dist.my_size)
)
if batch_size % ext_dist.my_size != 0:
sys.exit(
"ERROR: batch_size %d can not split across %d ranks evenly"
% (batch_size, ext_dist.my_size)
)
dense_x = dense_x[ext_dist.get_my_slice(batch_size)]
lS_o = lS_o[self.local_emb_slice]
lS_i = lS_i[self.local_emb_slice]
if (len(self.emb_l) != len(lS_o)) or (len(self.emb_l) != len(lS_i)):
sys.exit(
"ERROR: corrupted model input detected in distributed_forward call"
)
# embeddings
with record_function("DLRM embedding forward"):
ly = self.apply_emb(lS_o, lS_i, self.emb_l, self.v_W_l)
# WARNING: Note that at this point we have the result of the embedding lookup
# for the entire batch on each rank. We would like to obtain partial results
# corresponding to all embedding lookups, but part of the batch on each rank.
# Therefore, matching the distribution of output of bottom mlp, so that both
# could be used for subsequent interactions on each device.
if len(self.emb_l) != len(ly):
sys.exit("ERROR: corrupted intermediate result in distributed_forward call")
a2a_req = ext_dist.alltoall(ly, self.n_emb_per_rank)
with record_function("DLRM bottom nlp forward"):
x = self.apply_mlp(dense_x, self.bot_l)
ly = a2a_req.wait()
ly = list(ly)
# interactions
with record_function("DLRM interaction forward"):
z = self.interact_features(x, ly)
# top mlp
with record_function("DLRM top nlp forward"):
p = self.apply_mlp(z, self.top_l)
# clamp output if needed
if 0.0 < self.loss_threshold and self.loss_threshold < 1.0:
z = torch.clamp(p, min=self.loss_threshold, max=(1.0 - self.loss_threshold))
else:
z = p
return z
def sequential_forward(self, dense_x, lS_o, lS_i):
# process dense features (using bottom mlp), resulting in a row vector
x = self.apply_mlp(dense_x, self.bot_l)
# debug prints
# print("intermediate")
# print(x.detach().cpu().numpy())
# process sparse features(using embeddings), resulting in a list of row vectors
ly = self.apply_emb(lS_o, lS_i, self.emb_l, self.v_W_l)
# for y in ly:
# print(y.detach().cpu().numpy())
# interact features (dense and sparse)
z = self.interact_features(x, ly)
# print(z.detach().cpu().numpy())
# obtain probability of a click (using top mlp)
p = self.apply_mlp(z, self.top_l)
# clamp output if needed
if 0.0 < self.loss_threshold and self.loss_threshold < 1.0:
z = torch.clamp(p, min=self.loss_threshold, max=(1.0 - self.loss_threshold))
else:
z = p
return z
def parallel_forward(self, dense_x, lS_o, lS_i):
### prepare model (overwrite) ###
# WARNING: # of devices must be >= batch size in parallel_forward call
batch_size = dense_x.size()[0]
ndevices = min(self.ndevices, batch_size, len(self.emb_l))
device_ids = range(ndevices)
# WARNING: must redistribute the model if mini-batch size changes(this is common
# for last mini-batch, when # of elements in the dataset/batch size is not even
if self.parallel_model_batch_size != batch_size:
self.parallel_model_is_not_prepared = True
if self.parallel_model_is_not_prepared or self.sync_dense_params:
# replicate mlp (data parallelism)
self.bot_l_replicas = replicate(self.bot_l, device_ids)
self.top_l_replicas = replicate(self.top_l, device_ids)
self.parallel_model_batch_size = batch_size
if self.parallel_model_is_not_prepared:
# distribute embeddings (model parallelism)
t_list = []
w_list = []
for k, emb in enumerate(self.emb_l):
d = torch.device("cuda:" + str(k % ndevices))
t_list.append(emb.to(d))
if self.weighted_pooling == "learned":
w_list.append(Parameter(self.v_W_l[k].to(d)))
elif self.weighted_pooling == "fixed":
w_list.append(self.v_W_l[k].to(d))
else:
w_list.append(None)
self.emb_l = nn.ModuleList(t_list)
if self.weighted_pooling == "learned":
self.v_W_l = nn.ParameterList(w_list)
else:
self.v_W_l = w_list
self.parallel_model_is_not_prepared = False
### prepare input (overwrite) ###
# scatter dense features (data parallelism)
# print(dense_x.device)
dense_x = scatter(dense_x, device_ids, dim=0)
# distribute sparse features (model parallelism)
if (len(self.emb_l) != len(lS_o)) or (len(self.emb_l) != len(lS_i)):
sys.exit("ERROR: corrupted model input detected in parallel_forward call")
t_list = []
i_list = []
for k, _ in enumerate(self.emb_l):
d = torch.device("cuda:" + str(k % ndevices))
t_list.append(lS_o[k].to(d))
i_list.append(lS_i[k].to(d))
lS_o = t_list
lS_i = i_list
### compute results in parallel ###
# bottom mlp
# WARNING: Note that the self.bot_l is a list of bottom mlp modules
# that have been replicated across devices, while dense_x is a tuple of dense
# inputs that has been scattered across devices on the first (batch) dimension.
# The output is a list of tensors scattered across devices according to the
# distribution of dense_x.
x = parallel_apply(self.bot_l_replicas, dense_x, None, device_ids)
# debug prints
# print(x)
# embeddings
ly = self.apply_emb(lS_o, lS_i, self.emb_l, self.v_W_l)
# debug prints
# print(ly)
# butterfly shuffle (implemented inefficiently for now)
# WARNING: Note that at this point we have the result of the embedding lookup
# for the entire batch on each device. We would like to obtain partial results
# corresponding to all embedding lookups, but part of the batch on each device.
# Therefore, matching the distribution of output of bottom mlp, so that both
# could be used for subsequent interactions on each device.
if len(self.emb_l) != len(ly):
sys.exit("ERROR: corrupted intermediate result in parallel_forward call")
t_list = []
for k, _ in enumerate(self.emb_l):
d = torch.device("cuda:" + str(k % ndevices))
y = scatter(ly[k], device_ids, dim=0)
t_list.append(y)
# adjust the list to be ordered per device
ly = list(map(lambda y: list(y), zip(*t_list)))
# debug prints
# print(ly)
# interactions
z = []
for k in range(ndevices):
zk = self.interact_features(x[k], ly[k])
z.append(zk)
# debug prints
# print(z)
# top mlp
# WARNING: Note that the self.top_l is a list of top mlp modules that
# have been replicated across devices, while z is a list of interaction results
# that by construction are scattered across devices on the first (batch) dim.
# The output is a list of tensors scattered across devices according to the
# distribution of z.
p = parallel_apply(self.top_l_replicas, z, None, device_ids)
### gather the distributed results ###
p0 = gather(p, self.output_d, dim=0)
# clamp output if needed
if 0.0 < self.loss_threshold and self.loss_threshold < 1.0:
z0 = torch.clamp(
p0, min=self.loss_threshold, max=(1.0 - self.loss_threshold)
)
else:
z0 = p0
return z0
def dash_separated_ints(value):
vals = value.split("-")
for val in vals:
try:
int(val)
except ValueError:
raise argparse.ArgumentTypeError(
"%s is not a valid dash separated list of ints" % value
)
return value
def dash_separated_floats(value):
vals = value.split("-")
for val in vals:
try:
float(val)
except ValueError:
raise argparse.ArgumentTypeError(
"%s is not a valid dash separated list of floats" % value
)
return value
def inference(
args,
dlrm,
best_acc_test,
best_auc_test,
test_ld,
device,
use_gpu,
log_iter=-1,
):
test_accu = 0
test_samp = 0
if args.mlperf_logging:
scores = []
targets = []
for i, testBatch in enumerate(test_ld):
# early exit if nbatches was set by the user and was exceeded
if nbatches > 0 and i >= nbatches:
break
X_test, lS_o_test, lS_i_test, T_test, W_test, CBPP_test = unpack_batch(
testBatch
)
# Skip the batch if batch size not multiple of total ranks
if ext_dist.my_size > 1 and X_test.size(0) % ext_dist.my_size != 0:
print("Warning: Skiping the batch %d with size %d" % (i, X_test.size(0)))
continue
# forward pass
Z_test = dlrm_wrap(
X_test,
lS_o_test,
lS_i_test,
use_gpu,
device,
ndevices=ndevices,
)
### gather the distributed results on each rank ###
# For some reason it requires explicit sync before all_gather call if
# tensor is on GPU memory
if Z_test.is_cuda:
torch.cuda.synchronize()
(_, batch_split_lengths) = ext_dist.get_split_lengths(X_test.size(0))
if ext_dist.my_size > 1:
Z_test = ext_dist.all_gather(Z_test, batch_split_lengths)
if args.mlperf_logging:
S_test = Z_test.detach().cpu().numpy() # numpy array
T_test = T_test.detach().cpu().numpy() # numpy array
scores.append(S_test)
targets.append(T_test)
else:
with record_function("DLRM accuracy compute"):
# compute loss and accuracy
S_test = Z_test.detach().cpu().numpy() # numpy array
T_test = T_test.detach().cpu().numpy() # numpy array
mbs_test = T_test.shape[0] # = mini_batch_size except last
A_test = np.sum((np.round(S_test, 0) == T_test).astype(np.uint8))
test_accu += A_test
test_samp += mbs_test
if args.mlperf_logging:
with record_function("DLRM mlperf sklearn metrics compute"):
scores = np.concatenate(scores, axis=0)
targets = np.concatenate(targets, axis=0)
metrics = {
"recall": lambda y_true, y_score: sklearn.metrics.recall_score(
y_true=y_true, y_pred=np.round(y_score)
),
"precision": lambda y_true, y_score: sklearn.metrics.precision_score(
y_true=y_true, y_pred=np.round(y_score)
),
"f1": lambda y_true, y_score: sklearn.metrics.f1_score(
y_true=y_true, y_pred=np.round(y_score)
),
"ap": sklearn.metrics.average_precision_score,
"roc_auc": sklearn.metrics.roc_auc_score,
"accuracy": lambda y_true, y_score: sklearn.metrics.accuracy_score(
y_true=y_true, y_pred=np.round(y_score)
),
}
validation_results = {}
for metric_name, metric_function in metrics.items():
validation_results[metric_name] = metric_function(targets, scores)
writer.add_scalar(
"mlperf-metrics-test/" + metric_name,
validation_results[metric_name],
log_iter,
)
acc_test = validation_results["accuracy"]
else:
acc_test = test_accu / test_samp
writer.add_scalar("Test/Acc", acc_test, log_iter)
model_metrics_dict = {
"nepochs": args.nepochs,
"nbatches": nbatches,
"nbatches_test": nbatches_test,
"state_dict": dlrm.state_dict(),
"test_acc": acc_test,
}
if args.mlperf_logging:
is_best = validation_results["roc_auc"] > best_auc_test
if is_best:
best_auc_test = validation_results["roc_auc"]
model_metrics_dict["test_auc"] = best_auc_test
print(
"recall {:.4f}, precision {:.4f},".format(
validation_results["recall"],
validation_results["precision"],
)
+ " f1 {:.4f}, ap {:.4f},".format(
validation_results["f1"], validation_results["ap"]
)
+ " auc {:.4f}, best auc {:.4f},".format(
validation_results["roc_auc"], best_auc_test
)
+ " accuracy {:3.3f} %, best accuracy {:3.3f} %".format(
validation_results["accuracy"] * 100, best_acc_test * 100
),
flush=True,
)
else:
is_best = acc_test > best_acc_test
if is_best:
best_acc_test = acc_test
print(
" accuracy {:3.3f} %, best {:3.3f} %".format(
acc_test * 100, best_acc_test * 100
),
flush=True,
)
return model_metrics_dict, is_best
def run():
### parse arguments ###
parser = argparse.ArgumentParser(
description="Train Deep Learning Recommendation Model (DLRM)"
)
# model related parameters
parser.add_argument("--arch-sparse-feature-size", type=int, default=2)
parser.add_argument(
"--arch-embedding-size", type=dash_separated_ints, default="4-3-2"
)
# j will be replaced with the table number
parser.add_argument("--arch-mlp-bot", type=dash_separated_ints, default="4-3-2")
parser.add_argument("--arch-mlp-top", type=dash_separated_ints, default="4-2-1")
parser.add_argument(
"--arch-interaction-op", type=str, choices=["dot", "cat"], default="dot"
)
parser.add_argument("--arch-interaction-itself", action="store_true", default=False)
parser.add_argument("--weighted-pooling", type=str, default=None)
# embedding table options
parser.add_argument("--md-flag", action="store_true", default=False)
parser.add_argument("--md-threshold", type=int, default=200)
parser.add_argument("--md-temperature", type=float, default=0.3)
parser.add_argument("--md-round-dims", action="store_true", default=False)
parser.add_argument("--qr-flag", action="store_true", default=False)
parser.add_argument("--qr-threshold", type=int, default=200)
parser.add_argument("--qr-operation", type=str, default="mult")
parser.add_argument("--qr-collisions", type=int, default=4)
# activations and loss
parser.add_argument("--activation-function", type=str, default="relu")
parser.add_argument("--loss-function", type=str, default="mse") # or bce or wbce
parser.add_argument(
"--loss-weights", type=dash_separated_floats, default="1.0-1.0"
) # for wbce
parser.add_argument("--loss-threshold", type=float, default=0.0) # 1.0e-7
parser.add_argument("--round-targets", type=bool, default=False)
# data
parser.add_argument("--data-size", type=int, default=1)
parser.add_argument("--num-batches", type=int, default=0)
parser.add_argument(
"--data-generation", type=str, default="random"
) # synthetic or dataset
parser.add_argument(
"--rand-data-dist", type=str, default="uniform"
) # uniform or gaussian
parser.add_argument("--rand-data-min", type=float, default=0)
parser.add_argument("--rand-data-max", type=float, default=1)
parser.add_argument("--rand-data-mu", type=float, default=-1)
parser.add_argument("--rand-data-sigma", type=float, default=1)
parser.add_argument("--data-trace-file", type=str, default="./input/dist_emb_j.log")
parser.add_argument("--data-set", type=str, default="kaggle") # or terabyte
parser.add_argument("--raw-data-file", type=str, default="")
parser.add_argument("--processed-data-file", type=str, default="")
parser.add_argument("--data-randomize", type=str, default="total") # or day or none
parser.add_argument("--data-trace-enable-padding", type=bool, default=False)
parser.add_argument("--max-ind-range", type=int, default=-1)
parser.add_argument("--data-sub-sample-rate", type=float, default=0.0) # in [0, 1]
parser.add_argument("--num-indices-per-lookup", type=int, default=10)
parser.add_argument("--num-indices-per-lookup-fixed", type=bool, default=False)
parser.add_argument("--num-workers", type=int, default=0)
parser.add_argument("--memory-map", action="store_true", default=False)
# training
parser.add_argument("--mini-batch-size", type=int, default=1)
parser.add_argument("--nepochs", type=int, default=1)
parser.add_argument("--learning-rate", type=float, default=0.01)
parser.add_argument("--print-precision", type=int, default=5)
parser.add_argument("--numpy-rand-seed", type=int, default=123)
parser.add_argument("--sync-dense-params", type=bool, default=True)
parser.add_argument("--optimizer", type=str, default="sgd")
parser.add_argument(
"--dataset-multiprocessing",
action="store_true",
default=False,
help="The Kaggle dataset can be multiprocessed in an environment \
with more than 7 CPU cores and more than 20 GB of memory. \n \
The Terabyte dataset can be multiprocessed in an environment \
with more than 24 CPU cores and at least 1 TB of memory.",
)
# inference
parser.add_argument("--inference-only", action="store_true", default=False)
# quantize
parser.add_argument("--quantize-mlp-with-bit", type=int, default=32)
parser.add_argument("--quantize-emb-with-bit", type=int, default=32)
# onnx
parser.add_argument("--save-onnx", action="store_true", default=False)
# gpu
parser.add_argument("--use-gpu", action="store_true", default=False)
# distributed
parser.add_argument("--local_rank", type=int, default=-1)
parser.add_argument("--dist-backend", type=str, default="")
# debugging and profiling
parser.add_argument("--print-freq", type=int, default=1)
parser.add_argument("--test-freq", type=int, default=1)
parser.add_argument("--test-mini-batch-size", type=int, default=-1)
parser.add_argument("--test-num-workers", type=int, default=-1)
parser.add_argument("--print-time", action="store_true", default=False)
parser.add_argument("--print-wall-time", action="store_true", default=False)
parser.add_argument("--debug-mode", action="store_true", default=False)
parser.add_argument("--enable-profiling", action="store_true", default=False)
parser.add_argument("--plot-compute-graph", action="store_true", default=False)
parser.add_argument("--tensor-board-filename", type=str, default="run_kaggle_pt")
# store/load model
parser.add_argument("--save-model", type=str, default="")
parser.add_argument("--load-model", type=str, default="")
# mlperf logging (disables other output and stops early)
parser.add_argument("--mlperf-logging", action="store_true", default=False)
# stop at target accuracy Kaggle 0.789, Terabyte (sub-sampled=0.875) 0.8107
parser.add_argument("--mlperf-acc-threshold", type=float, default=0.0)
# stop at target AUC Terabyte (no subsampling) 0.8025
parser.add_argument("--mlperf-auc-threshold", type=float, default=0.0)
parser.add_argument("--mlperf-bin-loader", action="store_true", default=False)
parser.add_argument("--mlperf-bin-shuffle", action="store_true", default=False)
# mlperf gradient accumulation iterations
parser.add_argument("--mlperf-grad-accum-iter", type=int, default=1)
# LR policy
parser.add_argument("--lr-num-warmup-steps", type=int, default=0)
parser.add_argument("--lr-decay-start-step", type=int, default=0)
parser.add_argument("--lr-num-decay-steps", type=int, default=0)
global args
global nbatches
global nbatches_test
global writer
args = parser.parse_args()
if args.dataset_multiprocessing:
assert float(sys.version[:3]) > 3.7, "The dataset_multiprocessing " + \
"flag is susceptible to a bug in Python 3.7 and under. " + \
"https://github.com/facebookresearch/dlrm/issues/172"
if args.mlperf_logging:
mlperf_logger.log_event(key=mlperf_logger.constants.CACHE_CLEAR, value=True)
mlperf_logger.log_start(
key=mlperf_logger.constants.INIT_START, log_all_ranks=True
)
if args.weighted_pooling is not None:
if args.qr_flag:
sys.exit("ERROR: quotient remainder with weighted pooling is not supported")
if args.md_flag:
sys.exit("ERROR: mixed dimensions with weighted pooling is not supported")
if args.quantize_emb_with_bit in [4, 8]:
if args.qr_flag:
sys.exit(
"ERROR: 4 and 8-bit quantization with quotient remainder is not supported"
)
if args.md_flag:
sys.exit(
"ERROR: 4 and 8-bit quantization with mixed dimensions is not supported"
)
if args.use_gpu:
sys.exit(
"ERROR: 4 and 8-bit quantization on GPU is not supported"
)
### some basic setup ###
np.random.seed(args.numpy_rand_seed)
np.set_printoptions(precision=args.print_precision)
torch.set_printoptions(precision=args.print_precision)
torch.manual_seed(args.numpy_rand_seed)
if args.test_mini_batch_size < 0:
# if the parameter is not set, use the training batch size
args.test_mini_batch_size = args.mini_batch_size
if args.test_num_workers < 0:
# if the parameter is not set, use the same parameter for training
args.test_num_workers = args.num_workers
use_gpu = args.use_gpu and torch.cuda.is_available()
if not args.debug_mode:
ext_dist.init_distributed(local_rank=args.local_rank, use_gpu=use_gpu, backend=args.dist_backend)
if use_gpu:
torch.cuda.manual_seed_all(args.numpy_rand_seed)
torch.backends.cudnn.deterministic = True
if ext_dist.my_size > 1:
ngpus = 1
device = torch.device("cuda", ext_dist.my_local_rank)
else:
ngpus = torch.cuda.device_count()
device = torch.device("cuda", 0)
print("Using {} GPU(s)...".format(ngpus))
else:
device = torch.device("cpu")
print("Using CPU...")
### prepare training data ###
ln_bot = np.fromstring(args.arch_mlp_bot, dtype=int, sep="-")
# input data
if args.mlperf_logging:
mlperf_logger.barrier()
mlperf_logger.log_end(key=mlperf_logger.constants.INIT_STOP)
mlperf_logger.barrier()
mlperf_logger.log_start(key=mlperf_logger.constants.RUN_START)
mlperf_logger.barrier()
if args.data_generation == "dataset":
train_data, train_ld, test_data, test_ld = dp.make_criteo_data_and_loaders(args)
table_feature_map = {idx: idx for idx in range(len(train_data.counts))}
nbatches = args.num_batches if args.num_batches > 0 else len(train_ld)
nbatches_test = len(test_ld)
ln_emb = train_data.counts
# enforce maximum limit on number of vectors per embedding
if args.max_ind_range > 0:
ln_emb = np.array(
list(
map(
lambda x: x if x < args.max_ind_range else args.max_ind_range,
ln_emb,
)
)
)
else:
ln_emb = np.array(ln_emb)
m_den = train_data.m_den
ln_bot[0] = m_den
else:
# input and target at random
ln_emb = np.fromstring(args.arch_embedding_size, dtype=int, sep="-")
m_den = ln_bot[0]
train_data, train_ld, test_data, test_ld = dp.make_random_data_and_loader(args, ln_emb, m_den)
nbatches = args.num_batches if args.num_batches > 0 else len(train_ld)
nbatches_test = len(test_ld)
args.ln_emb = ln_emb.tolist()
if args.mlperf_logging:
print("command line args: ", json.dumps(vars(args)))
### parse command line arguments ###
m_spa = args.arch_sparse_feature_size
ln_emb = np.asarray(ln_emb)
num_fea = ln_emb.size + 1 # num sparse + num dense features
m_den_out = ln_bot[ln_bot.size - 1]
if args.arch_interaction_op == "dot":
# approach 1: all
# num_int = num_fea * num_fea + m_den_out
# approach 2: unique
if args.arch_interaction_itself:
num_int = (num_fea * (num_fea + 1)) // 2 + m_den_out
else:
num_int = (num_fea * (num_fea - 1)) // 2 + m_den_out
elif args.arch_interaction_op == "cat":
num_int = num_fea * m_den_out
else:
sys.exit(
"ERROR: --arch-interaction-op="
+ args.arch_interaction_op
+ " is not supported"
)
arch_mlp_top_adjusted = str(num_int) + "-" + args.arch_mlp_top
ln_top = np.fromstring(arch_mlp_top_adjusted, dtype=int, sep="-")
# sanity check: feature sizes and mlp dimensions must match
if m_den != ln_bot[0]:
sys.exit(
"ERROR: arch-dense-feature-size "
+ str(m_den)
+ " does not match first dim of bottom mlp "
+ str(ln_bot[0])
)
if args.qr_flag:
if args.qr_operation == "concat" and 2 * m_spa != m_den_out:
sys.exit(
"ERROR: 2 arch-sparse-feature-size "
+ str(2 * m_spa)
+ " does not match last dim of bottom mlp "
+ str(m_den_out)
+ " (note that the last dim of bottom mlp must be 2x the embedding dim)"
)
if args.qr_operation != "concat" and m_spa != m_den_out:
sys.exit(
"ERROR: arch-sparse-feature-size "
+ str(m_spa)
+ " does not match last dim of bottom mlp "
+ str(m_den_out)
)
else:
if m_spa != m_den_out:
sys.exit(
"ERROR: arch-sparse-feature-size "
+ str(m_spa)
+ " does not match last dim of bottom mlp "
+ str(m_den_out)
)
if num_int != ln_top[0]:
sys.exit(
"ERROR: # of feature interactions "
+ str(num_int)
+ " does not match first dimension of top mlp "
+ str(ln_top[0])
)
# assign mixed dimensions if applicable
if args.md_flag:
m_spa = md_solver(
torch.tensor(ln_emb),
args.md_temperature, # alpha
d0=m_spa,
round_dim=args.md_round_dims,
).tolist()
# test prints (model arch)
if args.debug_mode:
print("model arch:")
print(
"mlp top arch "
+ str(ln_top.size - 1)
+ " layers, with input to output dimensions:"
)
print(ln_top)
print("# of interactions")
print(num_int)
print(
"mlp bot arch "
+ str(ln_bot.size - 1)
+ " layers, with input to output dimensions:"
)
print(ln_bot)
print("# of features (sparse and dense)")
print(num_fea)
print("dense feature size")
print(m_den)
print("sparse feature size")
print(m_spa)
print(
"# of embeddings (= # of sparse features) "
+ str(ln_emb.size)
+ ", with dimensions "
+ str(m_spa)
+ "x:"
)
print(ln_emb)
print("data (inputs and targets):")
for j, inputBatch in enumerate(train_ld):
X, lS_o, lS_i, T, W, CBPP = unpack_batch(inputBatch)
torch.set_printoptions(precision=4)
# early exit if nbatches was set by the user and has been exceeded
if nbatches > 0 and j >= nbatches:
break
print("mini-batch: %d" % j)
print(X.detach().cpu())
# transform offsets to lengths when printing
print(
torch.IntTensor(
[
np.diff(
S_o.detach().cpu().tolist() + list(lS_i[i].shape)
).tolist()
for i, S_o in enumerate(lS_o)
]
)
)
print([S_i.detach().cpu() for S_i in lS_i])
print(T.detach().cpu())
global ndevices
ndevices = min(ngpus, args.mini_batch_size, num_fea - 1) if use_gpu else -1
### construct the neural network specified above ###
# WARNING: to obtain exactly the same initialization for
# the weights we need to start from the same random seed.
# np.random.seed(args.numpy_rand_seed)
global dlrm
dlrm = DLRM_Net(
m_spa,
ln_emb,
ln_bot,
ln_top,
arch_interaction_op=args.arch_interaction_op,
arch_interaction_itself=args.arch_interaction_itself,
sigmoid_bot=-1,
sigmoid_top=ln_top.size - 2,
sync_dense_params=args.sync_dense_params,
loss_threshold=args.loss_threshold,
ndevices=ndevices,
qr_flag=args.qr_flag,
qr_operation=args.qr_operation,
qr_collisions=args.qr_collisions,
qr_threshold=args.qr_threshold,
md_flag=args.md_flag,
md_threshold=args.md_threshold,
weighted_pooling=args.weighted_pooling,
loss_function=args.loss_function
)
# test prints
if args.debug_mode:
print("initial parameters (weights and bias):")
for param in dlrm.parameters():
print(param.detach().cpu().numpy())
# print(dlrm)
if use_gpu:
# Custom Model-Data Parallel
# the mlps are replicated and use data parallelism, while
# the embeddings are distributed and use model parallelism
dlrm = dlrm.to(device) # .cuda()
if dlrm.ndevices > 1:
dlrm.emb_l, dlrm.v_W_l = dlrm.create_emb(
m_spa, ln_emb, args.weighted_pooling
)
else:
if dlrm.weighted_pooling == "fixed":
for k, w in enumerate(dlrm.v_W_l):
dlrm.v_W_l[k] = w.cuda()
# distribute data parallel mlps
if ext_dist.my_size > 1:
if use_gpu:
device_ids = [ext_dist.my_local_rank]
dlrm.bot_l = ext_dist.DDP(dlrm.bot_l, device_ids=device_ids)
dlrm.top_l = ext_dist.DDP(dlrm.top_l, device_ids=device_ids)
else:
dlrm.bot_l = ext_dist.DDP(dlrm.bot_l)
dlrm.top_l = ext_dist.DDP(dlrm.top_l)
if not args.inference_only:
#if use_gpu and args.optimizer in ["rwsadagrad", "adagrad"]:
#sys.exit("GPU version of Adagrad is not supported by PyTorch.")
# specify the optimizer algorithm
opts = {
"sgd": torch.optim.SGD,
"rwsadagrad": RowWiseSparseAdagrad.RWSAdagrad,
"adagrad": torch.optim.Adagrad,
"RMSprop": torch.optim.RMSprop,
"Adadelta" : torch.optim.Adadelta,
"Adam" :torch.optim.Adam,
# "AdaMax" : torch.optim.Adamax(parameters, lr=0.002, betas=(0.9, 0.999), eps=1e-08, weight_decay=0),
# "ASGD" : torch.optim.ASGD(parameters, lr=0.01, lambd=0.0001, alpha=0.75, t0=1000000.0, weight_decay=0)
}
parameters = (
dlrm.parameters()
if ext_dist.my_size == 1
else [
{
"params": [p for emb in dlrm.emb_l for p in emb.parameters()],
"lr": args.learning_rate,
},
# TODO check this lr setup
# bottom mlp has no data parallelism
# need to check how do we deal with top mlp
{
"params": dlrm.bot_l.parameters(),
"lr": args.learning_rate,
},
{
"params": dlrm.top_l.parameters(),
"lr": args.learning_rate,
},
]
)
if args.optimizer in ["rwsadagrad", "adagrad"]:
optimizer = opts[args.optimizer](parameters, lr=args.learning_rate)
elif args.optimizer == "RMSprop":
optimizer = opts[args.optimizer](parameters, lr=args.learning_rate, alpha=0.99, eps=1e-08, weight_decay=0, momentum=0, centered=False)
elif args.optimizer == "Adam":
optimizer = opts[args.optimizer](parameters, lr=args.learning_rate, betas=(0.9, 0.999), eps=1e-08)
elif args.optimizer == "Adadelta":
optimizer = opts[args.optimizer](parameters, lr=args.learning_rate, rho=0.9, eps=1e-06, weight_decay=0)
lr_scheduler = LRPolicyScheduler(
optimizer,
args.lr_num_warmup_steps,
args.lr_decay_start_step,
args.lr_num_decay_steps,
)
### main loop ###
# training or inference
best_acc_test = 0
best_auc_test = 0
skip_upto_epoch = 0
skip_upto_batch = 0
total_time = 0
total_loss = 0
total_iter = 0
total_samp = 0
if args.mlperf_logging:
mlperf_logger.mlperf_submission_log("dlrm")
mlperf_logger.log_event(
key=mlperf_logger.constants.SEED, value=args.numpy_rand_seed
)
mlperf_logger.log_event(
key=mlperf_logger.constants.GLOBAL_BATCH_SIZE, value=args.mini_batch_size
)
# Load model is specified
if not (args.load_model == ""):
print("Loading saved model {}".format(args.load_model))
if use_gpu:
if dlrm.ndevices > 1:
# NOTE: when targeting inference on multiple GPUs,
# load the model as is on CPU or GPU, with the move
# to multiple GPUs to be done in parallel_forward
ld_model = torch.load(args.load_model)
else:
# NOTE: when targeting inference on single GPU,
# note that the call to .to(device) has already happened
ld_model = torch.load(
args.load_model,
map_location=torch.device("cuda")
# map_location=lambda storage, loc: storage.cuda(0)
)
else:
# when targeting inference on CPU
ld_model = torch.load(args.load_model, map_location=torch.device("cpu"))
dlrm.load_state_dict(ld_model["state_dict"])
ld_j = ld_model["iter"]
ld_k = ld_model["epoch"]
ld_nepochs = ld_model["nepochs"]
ld_nbatches = ld_model["nbatches"]
ld_nbatches_test = ld_model["nbatches_test"]
ld_train_loss = ld_model["train_loss"]
ld_total_loss = ld_model["total_loss"]
if args.mlperf_logging:
ld_gAUC_test = ld_model["test_auc"]
ld_acc_test = ld_model["test_acc"]
if not args.inference_only:
optimizer.load_state_dict(ld_model["opt_state_dict"])
best_acc_test = ld_acc_test
total_loss = ld_total_loss
skip_upto_epoch = ld_k # epochs
skip_upto_batch = ld_j # batches
else:
args.print_freq = ld_nbatches
args.test_freq = 0
print(
"Saved at: epoch = {:d}/{:d}, batch = {:d}/{:d}, ntbatch = {:d}".format(
ld_k, ld_nepochs, ld_j, ld_nbatches, ld_nbatches_test
)
)
print(
"Training state: loss = {:.6f}".format(
ld_train_loss,
)
)
if args.mlperf_logging:
print(
"Testing state: accuracy = {:3.3f} %, auc = {:.3f}".format(
ld_acc_test * 100, ld_gAUC_test
)
)
else:
print("Testing state: accuracy = {:3.3f} %".format(ld_acc_test * 100))
if args.inference_only:
# Currently only dynamic quantization with INT8 and FP16 weights are
# supported for MLPs and INT4 and INT8 weights for EmbeddingBag
# post-training quantization during the inference.
# By default we don't do the quantization: quantize_{mlp,emb}_with_bit == 32 (FP32)
assert args.quantize_mlp_with_bit in [
8,
16,
32,
], "only support 8/16/32-bit but got {}".format(args.quantize_mlp_with_bit)
assert args.quantize_emb_with_bit in [
4,
8,
32,
], "only support 4/8/32-bit but got {}".format(args.quantize_emb_with_bit)
if args.quantize_mlp_with_bit != 32:
if args.quantize_mlp_with_bit in [8]:
quantize_dtype = torch.qint8
else:
quantize_dtype = torch.float16
dlrm = torch.quantization.quantize_dynamic(
dlrm, {torch.nn.Linear}, quantize_dtype
)
if args.quantize_emb_with_bit != 32:
dlrm.quantize_embedding(args.quantize_emb_with_bit)
# print(dlrm)
print("time/loss/accuracy (if enabled):")
if args.mlperf_logging:
# LR is logged twice for now because of a compliance checker bug
mlperf_logger.log_event(
key=mlperf_logger.constants.OPT_BASE_LR, value=args.learning_rate
)
mlperf_logger.log_event(
key=mlperf_logger.constants.OPT_LR_WARMUP_STEPS,
value=args.lr_num_warmup_steps,
)
# use logging keys from the official HP table and not from the logging library
mlperf_logger.log_event(
key="sgd_opt_base_learning_rate", value=args.learning_rate
)
mlperf_logger.log_event(
key="lr_decay_start_steps", value=args.lr_decay_start_step
)
mlperf_logger.log_event(
key="sgd_opt_learning_rate_decay_steps", value=args.lr_num_decay_steps
)
mlperf_logger.log_event(key="sgd_opt_learning_rate_decay_poly_power", value=2)
tb_file = "./" + args.tensor_board_filename
writer = SummaryWriter(tb_file)
ext_dist.barrier()
with torch.autograd.profiler.profile(
args.enable_profiling, use_cuda=use_gpu, record_shapes=True
) as prof:
if not args.inference_only:
k = 0
total_time_begin = 0
while k < args.nepochs:
if args.mlperf_logging:
mlperf_logger.barrier()
mlperf_logger.log_start(
key=mlperf_logger.constants.BLOCK_START,
metadata={
mlperf_logger.constants.FIRST_EPOCH_NUM: (k + 1),
mlperf_logger.constants.EPOCH_COUNT: 1,
},
)
mlperf_logger.barrier()
mlperf_logger.log_start(
key=mlperf_logger.constants.EPOCH_START,
metadata={mlperf_logger.constants.EPOCH_NUM: (k + 1)},
)
if k < skip_upto_epoch:
continue
if args.mlperf_logging:
previous_iteration_time = None
for j, inputBatch in enumerate(train_ld):
if j == 0 and args.save_onnx:
X_onnx, lS_o_onnx, lS_i_onnx, _, _, _ = unpack_batch(inputBatch)
if j < skip_upto_batch:
continue
X, lS_o, lS_i, T, W, CBPP = unpack_batch(inputBatch)
if args.mlperf_logging:
current_time = time_wrap(use_gpu)
if previous_iteration_time:
iteration_time = current_time - previous_iteration_time
else:
iteration_time = 0
previous_iteration_time = current_time
else:
t1 = time_wrap(use_gpu)
# early exit if nbatches was set by the user and has been exceeded
if nbatches > 0 and j >= nbatches:
break
# Skip the batch if batch size not multiple of total ranks
if ext_dist.my_size > 1 and X.size(0) % ext_dist.my_size != 0:
print(
"Warning: Skiping the batch %d with size %d"
% (j, X.size(0))
)
continue
mbs = T.shape[0] # = args.mini_batch_size except maybe for last
# forward pass
Z = dlrm_wrap(
X,
lS_o,
lS_i,
use_gpu,
device,
ndevices=ndevices,
)
if ext_dist.my_size > 1:
T = T[ext_dist.get_my_slice(mbs)]
W = W[ext_dist.get_my_slice(mbs)]
# loss
E = loss_fn_wrap(Z, T, use_gpu, device)
# compute loss and accuracy
L = E.detach().cpu().numpy() # numpy array
# training accuracy is not disabled
# S = Z.detach().cpu().numpy() # numpy array
# T = T.detach().cpu().numpy() # numpy array
# # print("res: ", S)
# # print("j, train: BCE ", j, L)
# mbs = T.shape[0] # = args.mini_batch_size except maybe for last
# A = np.sum((np.round(S, 0) == T).astype(np.uint8))
with record_function("DLRM backward"):
# scaled error gradient propagation
# (where we do not accumulate gradients across mini-batches)
if (args.mlperf_logging and (j + 1) % args.mlperf_grad_accum_iter == 0) or not args.mlperf_logging:
optimizer.zero_grad()
# backward pass
E.backward()
# optimizer
if (args.mlperf_logging and (j + 1) % args.mlperf_grad_accum_iter == 0) or not args.mlperf_logging:
optimizer.step()
lr_scheduler.step()
if args.mlperf_logging:
total_time += iteration_time
else:
t2 = time_wrap(use_gpu)
total_time += t2 - t1
total_loss += L * mbs
total_iter += 1
total_samp += mbs
should_print = ((j + 1) % args.print_freq == 0) or (
j + 1 == nbatches
)
should_test = (
(args.test_freq > 0)
and (args.data_generation in ["dataset", "random"])
and (((j + 1) % args.test_freq == 0) or (j + 1 == nbatches))
)
# print time, loss and accuracy
if should_print or should_test:
gT = 1000.0 * total_time / total_iter if args.print_time else -1
total_time = 0
train_loss = total_loss / total_samp
total_loss = 0
str_run_type = (
"inference" if args.inference_only else "training"
)
wall_time = ""
if args.print_wall_time:
wall_time = " ({})".format(time.strftime("%H:%M"))
print(
"Finished {} it {}/{} of epoch {}, {:.2f} ms/it,".format(
str_run_type, j + 1, nbatches, k, gT
)
+ " loss {:.6f}".format(train_loss)
+ wall_time,
flush=True,
)
log_iter = nbatches * k + j + 1
writer.add_scalar("Train/Loss", train_loss, log_iter)
total_iter = 0
total_samp = 0
# testing
if should_test:
epoch_num_float = (j + 1) / len(train_ld) + k + 1
if args.mlperf_logging:
mlperf_logger.barrier()
mlperf_logger.log_start(
key=mlperf_logger.constants.EVAL_START,
metadata={
mlperf_logger.constants.EPOCH_NUM: epoch_num_float
},
)
# don't measure training iter time in a test iteration
if args.mlperf_logging:
previous_iteration_time = None
print(
"Testing at - {}/{} of epoch {},".format(j + 1, nbatches, k)
)
model_metrics_dict, is_best = inference(
args,
dlrm,
best_acc_test,
best_auc_test,
test_ld,
device,
use_gpu,
log_iter,
)
if (
is_best
and not (args.save_model == "")
and not args.inference_only
):
model_metrics_dict["epoch"] = k
model_metrics_dict["iter"] = j + 1
model_metrics_dict["train_loss"] = train_loss
model_metrics_dict["total_loss"] = total_loss
model_metrics_dict[
"opt_state_dict"
] = optimizer.state_dict()
print("Saving model to {}".format(args.save_model))
torch.save(model_metrics_dict, args.save_model)
if args.mlperf_logging:
mlperf_logger.barrier()
mlperf_logger.log_end(
key=mlperf_logger.constants.EVAL_STOP,
metadata={
mlperf_logger.constants.EPOCH_NUM: epoch_num_float
},
)
# Uncomment the line below to print out the total time with overhead
# print("Total test time for this group: {}" \
# .format(time_wrap(use_gpu) - accum_test_time_begin))
if (
args.mlperf_logging
and (args.mlperf_acc_threshold > 0)
and (best_acc_test > args.mlperf_acc_threshold)
):
print(
"MLPerf testing accuracy threshold "
+ str(args.mlperf_acc_threshold)
+ " reached, stop training"
)
break
if (
args.mlperf_logging
and (args.mlperf_auc_threshold > 0)
and (best_auc_test > args.mlperf_auc_threshold)
):
print(
"MLPerf testing auc threshold "
+ str(args.mlperf_auc_threshold)
+ " reached, stop training"
)
if args.mlperf_logging:
mlperf_logger.barrier()
mlperf_logger.log_end(
key=mlperf_logger.constants.RUN_STOP,
metadata={
mlperf_logger.constants.STATUS: mlperf_logger.constants.SUCCESS
},
)
break
if args.mlperf_logging:
mlperf_logger.barrier()
mlperf_logger.log_end(
key=mlperf_logger.constants.EPOCH_STOP,
metadata={mlperf_logger.constants.EPOCH_NUM: (k + 1)},
)
mlperf_logger.barrier()
mlperf_logger.log_end(
key=mlperf_logger.constants.BLOCK_STOP,
metadata={mlperf_logger.constants.FIRST_EPOCH_NUM: (k + 1)},
)
k += 1 # nepochs
if args.mlperf_logging and best_auc_test <= args.mlperf_auc_threshold:
mlperf_logger.barrier()
mlperf_logger.log_end(
key=mlperf_logger.constants.RUN_STOP,
metadata={
mlperf_logger.constants.STATUS: mlperf_logger.constants.ABORTED
},
)
else:
print("Testing for inference only")
inference(
args,
dlrm,
best_acc_test,
best_auc_test,
test_ld,
device,
use_gpu,
)
# profiling
if args.enable_profiling:
time_stamp = str(datetime.datetime.now()).replace(" ", "_")
with open("dlrm_s_pytorch" + time_stamp + "_shape.prof", "w") as prof_f:
prof_f.write(
prof.key_averages(group_by_input_shape=True).table(
sort_by="self_cpu_time_total"
)
)
with open("dlrm_s_pytorch" + time_stamp + "_total.prof", "w") as prof_f:
prof_f.write(prof.key_averages().table(sort_by="self_cpu_time_total"))
prof.export_chrome_trace("dlrm_s_pytorch" + time_stamp + ".json")
# print(prof.key_averages().table(sort_by="cpu_time_total"))
# plot compute graph
if args.plot_compute_graph:
sys.exit(
"ERROR: Please install pytorchviz package in order to use the"
+ " visualization. Then, uncomment its import above as well as"
+ " three lines below and run the code again."
)
# V = Z.mean() if args.inference_only else E
# dot = make_dot(V, params=dict(dlrm.named_parameters()))
# dot.render('dlrm_s_pytorch_graph') # write .pdf file
# test prints
if not args.inference_only and args.debug_mode:
print("updated parameters (weights and bias):")
for param in dlrm.parameters():
print(param.detach().cpu().numpy())
# export the model in onnx
if args.save_onnx:
"""
# workaround 1: tensor -> list
if torch.is_tensor(lS_i_onnx):
lS_i_onnx = [lS_i_onnx[j] for j in range(len(lS_i_onnx))]
# workaound 2: list -> tensor
lS_i_onnx = torch.stack(lS_i_onnx)
"""
# debug prints
# print("inputs", X_onnx, lS_o_onnx, lS_i_onnx)
# print("output", dlrm_wrap(X_onnx, lS_o_onnx, lS_i_onnx, use_gpu, device))
dlrm_pytorch_onnx_file = "dlrm_s_pytorch.onnx"
batch_size = X_onnx.shape[0]
print("X_onnx.shape", X_onnx.shape)
if torch.is_tensor(lS_o_onnx):
print("lS_o_onnx.shape", lS_o_onnx.shape)
else:
for oo in lS_o_onnx:
print("oo.shape", oo.shape)
if torch.is_tensor(lS_i_onnx):
print("lS_i_onnx.shape", lS_i_onnx.shape)
else:
for ii in lS_i_onnx:
print("ii.shape", ii.shape)
# name inputs and outputs
o_inputs = (
["offsets"]
if torch.is_tensor(lS_o_onnx)
else ["offsets_" + str(i) for i in range(len(lS_o_onnx))]
)
i_inputs = (
["indices"]
if torch.is_tensor(lS_i_onnx)
else ["indices_" + str(i) for i in range(len(lS_i_onnx))]
)
all_inputs = ["dense_x"] + o_inputs + i_inputs
# debug prints
print("inputs", all_inputs)
# create dynamic_axis dictionaries
do_inputs = (
[{"offsets": {1: "batch_size"}}]
if torch.is_tensor(lS_o_onnx)
else [
{"offsets_" + str(i): {0: "batch_size"}} for i in range(len(lS_o_onnx))
]
)
di_inputs = (
[{"indices": {1: "batch_size"}}]
if torch.is_tensor(lS_i_onnx)
else [
{"indices_" + str(i): {0: "batch_size"}} for i in range(len(lS_i_onnx))
]
)
dynamic_axes = {"dense_x": {0: "batch_size"}, "pred": {0: "batch_size"}}
for do in do_inputs:
dynamic_axes.update(do)
for di in di_inputs:
dynamic_axes.update(di)
# debug prints
print(dynamic_axes)
# export model
torch.onnx.export(
dlrm,
(X_onnx, lS_o_onnx, lS_i_onnx),
dlrm_pytorch_onnx_file,
verbose=True,
use_external_data_format=True,
opset_version=11,
input_names=all_inputs,
output_names=["pred"],
dynamic_axes=dynamic_axes,
)
# recover the model back
dlrm_pytorch_onnx = onnx.load("dlrm_s_pytorch.onnx")
# check the onnx model
onnx.checker.check_model(dlrm_pytorch_onnx)
total_time_end = time_wrap(use_gpu)
if __name__ == "__main__":
run()
|
py
|
1a59d725cd7587b8c289b4fbd634c0db7e6741f5
|
# Copyright 2017 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import defaultdict
import copy
import datetime
from dateutil.tz import tzlocal
import time
from policyuniverse import expand_policy, get_actions_from_statement, all_permissions
from repokid.utils.dynamo import (add_to_end_of_list, get_role_data, role_ids_for_account, set_role_data,
store_initial_role_data)
from repokid import CONFIG as CONFIG
from repokid import LOGGER as LOGGER
import repokid.hooks
from repokid.role import Role
IAM_ACCESS_ADVISOR_UNSUPPORTED_SERVICES = frozenset(['lightsail', 'organizations', 'tag'])
IAM_ACCESS_ADVISOR_UNSUPPORTED_ACTIONS = frozenset(['iam:passrole'])
# permission decisions have the form repoable - boolean, and decider - string
class RepoablePermissionDecision(object):
def __init__(self):
self.repoable = None
self.decider = ''
def __repr__(self):
return('Is repoable: {}, Decider: {}'.format(self.repoable, self.decider))
def add_new_policy_version(dynamo_table, role, current_policy, update_source):
"""
Create a new entry in the history of policy versions in Dynamo. The entry contains the source of the new policy:
(scan, repo, or restore) the current time, and the current policy contents. Updates the role's policies with the
full policies including the latest.
Args:
role (Role)
current_policy (dict)
update_source (string): ['Repo', 'Scan', 'Restore']
Returns:
None
"""
policy_entry = {'Source': update_source, 'Discovered': datetime.datetime.utcnow().isoformat(),
'Policy': current_policy}
add_to_end_of_list(dynamo_table, role.role_id, 'Policies', policy_entry)
role.policies = get_role_data(dynamo_table, role.role_id, fields=['Policies'])['Policies']
def find_and_mark_inactive(dynamo_table, account_number, active_roles):
"""
Mark roles in the account that aren't currently active inactive. Do this by getting all roles in the account and
subtracting the active roles, any that are left are inactive and should be marked thusly.
Args:
account_number (string)
active_roles (set): the currently active roles discovered in the most recent scan
Returns:
None
"""
active_roles = set(active_roles)
known_roles = set(role_ids_for_account(dynamo_table, account_number))
inactive_roles = known_roles - active_roles
for roleID in inactive_roles:
role_dict = get_role_data(dynamo_table, roleID, fields=['Active', 'Arn'])
if role_dict.get('Active'):
set_role_data(dynamo_table, roleID, {'Active': False})
def find_newly_added_permissions(old_policy, new_policy):
"""
Compare and old version of policies to a new version and return a set of permissions that were added. This will
be used to maintain a list of permissions that were newly added and should not be repoed for a period of time.
Args:
old_policy
new_policy
Returns:
set: Exapnded set of permissions that are in the new policy and not the old one
"""
old_permissions = _get_role_permissions(Role({'Policies': [{'Policy': old_policy}]}))
new_permissions = _get_role_permissions(Role({'Policies': [{'Policy': new_policy}]}))
return new_permissions - old_permissions
def update_no_repo_permissions(dynamo_table, role, newly_added_permissions):
"""
Update Dyanmo entry for newly added permissions. Any that were newly detected get added with an expiration
date of now plus the config setting for 'repo_requirements': 'exclude_new_permissions_for_days'. Expired entries
get deleted. Also update the role object with the new no-repo-permissions.
Args:
role
newly_added_permissions (set)
Returns:
None
"""
current_ignored_permissions = get_role_data(
dynamo_table, role.role_id, fields=['NoRepoPermissions']).get('NoRepoPermissions', {})
new_ignored_permissions = {}
current_time = int(time.time())
new_perms_expire_time = current_time + (
24 * 60 * 60 * CONFIG['repo_requirements'].get('exclude_new_permissions_for_days', 14))
# only copy non-expired items to the new dictionary
for permission, expire_time in current_ignored_permissions.items():
if expire_time > current_time:
new_ignored_permissions[permission] = current_ignored_permissions[permission]
for permission in newly_added_permissions:
new_ignored_permissions[permission] = new_perms_expire_time
role.no_repo_permissions = new_ignored_permissions
set_role_data(dynamo_table, role.role_id, {'NoRepoPermissions': role.no_repo_permissions})
def update_opt_out(dynamo_table, role):
"""
Update opt-out object for a role - remove (set to empty dict) any entries that have expired
Opt-out objects should have the form {'expire': xxx, 'owner': xxx, 'reason': xxx}
Args:
role
Returns:
None
"""
if role.opt_out and int(role.opt_out['expire']) < int(time.time()):
set_role_data(dynamo_table, role.role_id, {'OptOut': {}})
def update_role_data(dynamo_table, account_number, role, current_policy, source='Scan', add_no_repo=True):
"""
Compare the current version of a policy for a role and what has been previously stored in Dynamo.
- If current and new policy versions are different store the new version in Dynamo. Add any newly added
permissions to temporary permission blacklist. Purge any old entries from permission blacklist.
- Refresh the updated time on the role policy
- If the role is completely new, store the first version in Dynamo
- Updates the role with full history of policies, including current version
Args:
dynamo_table
account_number
role (Role): current role being updated
current_policy (dict): representation of the current policy version
source: Default 'Scan' but could be Repo, Rollback, etc
Returns:
None
"""
# policy_entry: source, discovered, policy
stored_role = get_role_data(dynamo_table, role.role_id, fields=['OptOut', 'Policies'])
if not stored_role:
role_dict = store_initial_role_data(dynamo_table, role.arn, role.create_date, role.role_id, role.role_name,
account_number, current_policy)
role.set_attributes(role_dict)
LOGGER.info('Added new role ({}): {}'.format(role.role_id, role.arn))
else:
# is the policy list the same as the last we had?
old_policy = stored_role['Policies'][-1]['Policy']
if current_policy != old_policy:
add_new_policy_version(dynamo_table, role, current_policy, source)
LOGGER.info('{} has different inline policies than last time, adding to role store'.format(role.arn))
newly_added_permissions = find_newly_added_permissions(old_policy, current_policy)
else:
newly_added_permissions = set()
if add_no_repo:
update_no_repo_permissions(dynamo_table, role, newly_added_permissions)
update_opt_out(dynamo_table, role)
set_role_data(dynamo_table, role.role_id, {'Refreshed': datetime.datetime.utcnow().isoformat()})
# Update all data from Dynamo except CreateDate (it's in the wrong format) and DQ_by (we're going to recalc)
current_role_data = get_role_data(dynamo_table, role.role_id)
current_role_data.pop('CreateDate', None)
current_role_data.pop('DisqualifiedBy', None)
role.set_attributes(current_role_data)
def update_stats(dynamo_table, roles, source='Scan'):
"""
Create a new stats entry for each role in a set of roles and add it to Dynamo
Args:
roles (Roles): a list of all the role objects to update data for
source (string): the source of the new stats data (repo, scan, etc)
Returns:
None
"""
for role in roles:
new_stats = {'Date': datetime.datetime.utcnow().isoformat(),
'DisqualifiedBy': role.disqualified_by,
'PermissionsCount': role.total_permissions,
'RepoablePermissionsCount': role.repoable_permissions,
'Source': source}
try:
cur_stats = role.stats[-1]
except IndexError:
cur_stats = {'DisqualifiedBy': [], 'PermissionsCount': 0, 'RepoablePermissionsCount': 0}
for item in ['DisqualifiedBy', 'PermissionsCount', 'RepoablePermissionsCount']:
if new_stats.get(item) != cur_stats.get(item):
add_to_end_of_list(dynamo_table, role.role_id, 'Stats', new_stats)
def _calculate_repo_scores(roles, minimum_age, hooks):
"""
Get the total and repoable permissions count and set of repoable services for every role in the account.
For each role:
1) call _get_role_permissions
2) call _get_repoable_permissions (count), repoable_permissions (count), and repoable_services (list) for role
Each time we got the role permissions we built a list of any permissions that the role's policies granted access
to but weren't in our master list of permissions AWS has. At the end of this run we'll warn about any of these.
Args:
roles (Roles): The set of all roles we're analyzing
minimum_age
hooks
Returns:
None
"""
for role in roles:
permissions = _get_role_permissions(role)
role.total_permissions = len(permissions)
# if we don't have any access advisor data for a service than nothing is repoable
if not role.aa_data:
LOGGER.info('No data found in access advisor for {}'.format(role.role_id))
role.repoable_permissions = 0
role.repoable_services = []
continue
# permissions are only repoable if the role isn't being disqualified by filter(s)
if len(role.disqualified_by) == 0:
repoable_permissions = _get_repoable_permissions(role.account, role.role_name, permissions, role.aa_data,
role.no_repo_permissions, minimum_age, hooks)
(repoable_permissions_set, repoable_services_set) = _convert_repoable_perms_to_perms_and_services(
permissions, repoable_permissions)
role.repoable_permissions = len(repoable_permissions)
# we're going to store both repoable permissions and repoable services in the field "RepoableServices"
role.repoable_services = repoable_services_set + repoable_permissions_set
else:
role.repoable_permissions = 0
role.repoable_services = []
def _convert_repoable_perms_to_perms_and_services(total_permissions, repoable_permissions):
"""
Take a list of total permissions and repoable permissions and determine whether only a few permissions are being
repoed or if the entire service (all permissions from that service) are being removed.
Args:
total_permissions (list): A list of the total permissions a role has
repoable_permissions (list): A list of repoable permissions suggested to be removed
Returns:
list: Sorted list of permissions that will be individually removed but other permissions from the service will
be kept
list: Sorted list of services that will be completely removed
"""
repoed_permissions = set()
repoed_services = set()
total_perms_by_service = defaultdict(list)
repoable_perms_by_service = defaultdict(list)
# group total permissions and repoable permissions by service
for perm in total_permissions:
total_perms_by_service[perm.split(':')[0]].append(perm)
for perm in repoable_permissions:
repoable_perms_by_service[perm.split(':')[0]].append(perm)
for service in repoable_perms_by_service:
if all(perm in repoable_perms_by_service[service] for perm in total_perms_by_service[service]):
repoed_services.add(service)
else:
repoed_permissions.update(perm for perm in repoable_perms_by_service[service])
return (sorted(repoed_permissions), sorted(repoed_services))
def _convert_repoed_service_to_sorted_perms_and_services(repoed_services):
"""
Repokid stores a field RepoableServices that historically only stored services (when Access Advisor was only data).
Now this field is repurposed to store both services and permissions. We can tell the difference because permissions
always have the form <service>:<permission>. This function splits the contents of the field to sorted sets of
repoable services and permissions.
Args:
repoed_services (list): List from Dynamo of repoable services and permissions
Returns:
list: Sorted list of repoable permissions (where there are other permissions that aren't repoed)
list: Sorted list of repoable services (where the entire service is removed)
"""
repoable_permissions = set()
repoable_services = set()
for entry in repoed_services:
if len(entry.split(':')) == 2:
repoable_permissions.add(entry)
else:
repoable_services.add(entry)
return (sorted(repoable_permissions), sorted(repoable_services))
def _get_repoable_permissions(account_number, role_name, permissions, aa_data, no_repo_permissions, minimum_age,
hooks):
"""
Generate a list of repoable permissions for a role based on the list of all permissions the role's policies
currently allow and Access Advisor data for the services included in the role's policies.
The first step is to come up with a list of services that were used within the time threshold (the same defined)
in the age filter config. Permissions are repoable if they aren't in the used list, aren't in the constant list
of unsupported services/actions (IAM_ACCESS_ADVISOR_UNSUPPORTED_SERVICES, IAM_ACCESS_ADVISOR_UNSUPPORTED_ACTIONS),
and aren't being temporarily ignored because they're on the no_repo_permissions list (newly added).
Args:
account_number
role_name
permissions (list): The full list of permissions that the role's permissions allow
aa_data (list): A list of Access Advisor data for a role. Each element is a dictionary with a couple required
attributes: lastAuthenticated (epoch time in milliseconds when the service was last used and
serviceNamespace (the service used)
no_repo_permissions (dict): Keys are the name of permissions and values are the time the entry expires
minimum_age: Minimum age of a role (in days) for it to be repoable
hooks: Dict containing hook names and functions to run
Returns:
set: Permissions that are 'repoable' (not used within the time threshold)
"""
ago = datetime.timedelta(minimum_age)
now = datetime.datetime.now(tzlocal())
current_time = time.time()
no_repo_list = [perm.lower() for perm in no_repo_permissions if no_repo_permissions[perm] > current_time]
# cast all permissions to lowercase
permissions = [permission.lower() for permission in permissions]
potentially_repoable_permissions = {permission: RepoablePermissionDecision()
for permission in permissions if permission not in no_repo_list}
used_services = set()
for service in aa_data:
accessed = service['lastAuthenticated']
if not accessed:
continue
accessed = datetime.datetime.fromtimestamp(accessed / 1000, tzlocal())
if accessed > now - ago:
used_services.add(service['serviceNamespace'])
for permission_name, permission_decision in potentially_repoable_permissions.items():
if permission_name.split(':')[0] in IAM_ACCESS_ADVISOR_UNSUPPORTED_SERVICES:
LOGGER.warn('skipping {}'.format(permission_name))
continue
# we have an unused service but need to make sure it's repoable
if permission_name.split(':')[0] not in used_services:
if permission_name in IAM_ACCESS_ADVISOR_UNSUPPORTED_ACTIONS:
LOGGER.warn('skipping {}'.format(permission_name))
continue
permission_decision.repoable = True
permission_decision.decider = 'Access Advisor'
hooks_output = repokid.hooks.call_hooks(hooks, 'DURING_REPOABLE_CALCULATION',
{'account_number': account_number,
'role_name': role_name,
'potentially_repoable_permissions': potentially_repoable_permissions,
'minimum_age': minimum_age})
# TODO: make option to show source of repoable?
return set([permission_name for permission_name, permission_value in
hooks_output['potentially_repoable_permissions'].items() if permission_value.repoable])
def _get_repoed_policy(policies, repoable_permissions):
"""
This function contains the logic to rewrite the policy to remove any repoable permissions. To do so we:
- Iterate over role policies
- Iterate over policy statements
- Skip Deny statements
- Remove any actions that are in repoable_permissions
- Remove any statements that now have zero actions
- Remove any policies that now have zero statements
Args:
policies (dict): All of the inline policies as a dict with name and policy contents
repoable_permissions (set): A set of all of the repoable permissions for policies
Returns:
dict: The rewritten set of all inline policies
list: Any policies that are now empty as a result of the rewrites
"""
# work with our own copy; don't mess with the CACHE copy.
role_policies = copy.deepcopy(policies)
empty_policies = []
for policy_name, policy in role_policies.items():
# list of indexes in the policy that are empty
empty_statements = []
if type(policy['Statement']) is dict:
policy['Statement'] = [policy['Statement']]
for idx, statement in enumerate(policy['Statement']):
if statement['Effect'].lower() == 'allow':
statement_actions = get_actions_from_statement(statement)
statement_actions = statement_actions.difference(repoable_permissions)
# get_actions_from_statement has already inverted this so our new statement should be 'Action'
if 'NotAction' in statement:
del statement['NotAction']
# by putting this into a set, we lose order, which may be confusing to someone.
statement['Action'] = sorted(list(statement_actions))
# mark empty statements to be removed
if len(statement['Action']) == 0:
empty_statements.append(idx)
# do the actual removal of empty statements
for idx in sorted(empty_statements, reverse=True):
del policy['Statement'][idx]
# mark empty policies to be removed
if len(policy['Statement']) == 0:
empty_policies.append(policy_name)
# do the actual removal of empty policies.
for policy_name in empty_policies:
del role_policies[policy_name]
return role_policies, empty_policies
def _get_permissions_in_policy(policy_dict, warn_unknown_perms=False):
"""
Given a set of policies for a role, return a set of all allowed permissions
Args:
policy_dict
warn_unknown_perms
Returns
set - all permissions allowed by the policies
"""
permissions = set()
for policy_name, policy in policy_dict.items():
policy = expand_policy(policy=policy, expand_deny=False)
for statement in policy.get('Statement'):
if statement['Effect'].lower() == 'allow':
permissions = permissions.union(get_actions_from_statement(statement))
weird_permissions = permissions.difference(all_permissions)
if weird_permissions and warn_unknown_perms:
LOGGER.warn('Unknown permissions found: {}'.format(weird_permissions))
return permissions
def _get_role_permissions(role, warn_unknown_perms=False):
"""
Expand the most recent version of policies from a role to produce a list of all the permissions that are allowed
(permission is included in one or more statements that is allowed). To perform expansion the policyuniverse
library is used. The result is a list of all of the individual permissions that are allowed in any of the
statements. If our resultant list contains any permissions that aren't listed in the master list of permissions
we'll raise an exception with the set of unknown permissions found.
Args:
role (Role): The role object that we're getting a list of permissions for
Returns:
set: A set of permissions that the role has policies that allow
"""
return _get_permissions_in_policy(role.policies[-1]['Policy'])
def _get_services_in_permissions(permissions_set):
"""
Given a set of permissions, return a sorted set of services
Args:
permissions_set
Returns:
services_set
"""
services_set = set()
for permission in permissions_set:
try:
service = permission.split(':')[0]
except IndexError:
pass
else:
services_set.add(service)
return sorted(services_set)
|
py
|
1a59d7d9db96fa9df8e35dba7717b753d5417ac6
|
"""Scraper for the Maryland Attorney General
CourtID: ag
Court Short Name: Maryland Attorney General
"""
import datetime
import os
from time import sleep
from lxml import html
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from juriscraper.AbstractSite import logger, phantomjs_executable_path
from juriscraper.OpinionSite import OpinionSite
from juriscraper.lib.string_utils import convert_date_string
class Site(OpinionSite):
"""This scraper is strange. The site it temperamental, and the javascript
seems to load successfully on some runs, but not on others. The dates are
also estimated, and the names are actually semi-long summaries. Furthermore,
the site's source is unmanageable, which has prevented us from being able to
create legitimate test/example files for coverage. We have a single example
file that's an empty document skeleton to prevent the test mechanism from
complaining. But it isn't a test providing real coverage.
We are doing the best we can with a bad site.
"""
def __init__(self, *args, **kwargs):
super(Site, self).__init__(*args, **kwargs)
self.court_id = self.__module__
self.year = datetime.date.today().year
self.domain = 'http://www.marylandattorneygeneral.gov'
self.url = '%s/Pages/Opinions/index.aspx' % self.domain
self.back_scrape_iterable = range(1993, self.year + 1)
self.parent_path_base = '//tbody/tr/td[contains(./text(), "%d")]'
self.parent_path = self.parent_path_base % self.year
self.cell_path = '//tbody[@isloaded="true"]/tr/td[%d]'
self.next_path = '//a[@title="Next"]'
self.driver = False
def _download(self, request_dict={}):
if self.test_mode_enabled():
return [super(Site, self)._download(request_dict)]
trees = self.get_dynamic_html_trees()
if not len(trees):
# No opinions for current year on page, SO no
# js to load. Return regular page html and
# extract 0 cases because nothing there
return [super(Site, self)._download(request_dict)]
return trees
def get_dynamic_html_trees(self):
# Initialize driver
driver = webdriver.PhantomJS(
executable_path=phantomjs_executable_path,
service_log_path=os.path.devnull, # Disable ghostdriver.log
)
driver.get(self.url)
# Find and activate the opinion drop-down for year
try:
date_anchor = driver.find_element_by_xpath('%s/a' % self.parent_path)
except NoSuchElementException:
# Year has no opinions drop-down on page
return []
date_anchor.click()
trees = [self.get_tree_from_driver_dom(driver)]
# Handle pagination if more than 30 results for year
while True:
try:
next_anchor = driver.find_element_by_xpath(self.next_path)
except NoSuchElementException:
# Less than 30 results
break
next_anchor.click()
trees.append(self.get_tree_from_driver_dom(driver))
return trees
def get_tree_from_driver_dom(self, driver):
# Wait for js to load and dom html to update
# Seems stupid, but necessary, and easier
# thank loading lots of selenium dependencies
# and using complex WebDriverWait with callbacks
# for attribute to appear, which don't even
# seem to work consistently with the site's
# finicky responses.
sleep(3)
source = driver.execute_script("return document.getElementsByTagName('html')[0].innerHTML")
tree = html.fromstring(source)
tree.make_links_absolute(self.domain)
return tree
def _get_case_names(self):
names = []
path = self.cell_path % 3
for tree in self.html:
names.extend([cell.text_content().strip() for cell in tree.xpath(path)])
return names
def _get_download_urls(self):
urls = []
path = (self.cell_path % 4) + '/a/@href'
for tree in self.html:
urls.extend([href for href in tree.xpath(path)])
return urls
def _get_case_dates(self):
today = datetime.date.today()
count = len(self._get_case_names())
middle_of_year = convert_date_string('July 2, %d' % self.year)
if self.year == today.year:
# Not a backscraper, assume cases were filed on day scraped.
return [today] * count
else:
# All we have is the year, so estimate the middle most day
return [middle_of_year] * count
def _get_docket_numbers(self):
dockets = []
path = self.cell_path % 1
for tree in self.html:
for cell in tree.xpath(path):
dockets.append(cell.text_content().replace('Unpublished', ''))
return dockets
def _get_precedential_statuses(self):
statuses = []
path = self.cell_path % 1
for tree in self.html:
for cell in tree.xpath(path):
if 'Unpublished' in cell.text_content():
statuses.append('Unpublished')
else:
statuses.append('Published')
return statuses
def _get_date_filed_is_approximate(self):
return ['True'] * len(self.case_names)
def _download_backwards(self, year):
"""Iterate over drop down for each year on the page"""
self.year = year
self.parent_path = self.parent_path_base % year
self.html = self._download()
|
py
|
1a59d7e60881cabe3128f3138a9a72aaaa7378b0
|
# Generated by Django 2.2.6 on 2019-12-28 10:13
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='CustomUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
|
py
|
1a59d847b85d208ff21747b05c539f66cfd7ebb5
|
begin_unit
comment|'# Copyright 2015 Rackspace Hosting, Inc.'
nl|'\n'
comment|'# All Rights Reserved.'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Licensed under the Apache License, Version 2.0 (the "License"); you may'
nl|'\n'
comment|'# not use this file except in compliance with the License. You may obtain'
nl|'\n'
comment|'# a copy of the License at'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# http://www.apache.org/licenses/LICENSE-2.0'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Unless required by applicable law or agreed to in writing, software'
nl|'\n'
comment|'# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT'
nl|'\n'
comment|'# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the'
nl|'\n'
comment|'# License for the specific language governing permissions and limitations'
nl|'\n'
comment|'# under the License.'
nl|'\n'
nl|'\n'
nl|'\n'
name|'import'
name|'fixtures'
newline|'\n'
nl|'\n'
name|'from'
name|'nova'
name|'import'
name|'test'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'virt'
op|'.'
name|'disk'
op|'.'
name|'mount'
name|'import'
name|'block'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'virt'
op|'.'
name|'image'
name|'import'
name|'model'
name|'as'
name|'imgmodel'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|class|LoopTestCase
name|'class'
name|'LoopTestCase'
op|'('
name|'test'
op|'.'
name|'NoDBTestCase'
op|')'
op|':'
newline|'\n'
DECL|member|setUp
indent|' '
name|'def'
name|'setUp'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'super'
op|'('
name|'LoopTestCase'
op|','
name|'self'
op|')'
op|'.'
name|'setUp'
op|'('
op|')'
newline|'\n'
nl|'\n'
name|'device_path'
op|'='
string|"'/dev/mapper/instances--instance-0000001_disk'"
newline|'\n'
name|'self'
op|'.'
name|'image'
op|'='
name|'imgmodel'
op|'.'
name|'LocalBlockImage'
op|'('
name|'device_path'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_get_dev
dedent|''
name|'def'
name|'test_get_dev'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'tempdir'
op|'='
name|'self'
op|'.'
name|'useFixture'
op|'('
name|'fixtures'
op|'.'
name|'TempDir'
op|'('
op|')'
op|')'
op|'.'
name|'path'
newline|'\n'
name|'b'
op|'='
name|'block'
op|'.'
name|'BlockMount'
op|'('
name|'self'
op|'.'
name|'image'
op|','
name|'tempdir'
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'assertTrue'
op|'('
name|'b'
op|'.'
name|'get_dev'
op|'('
op|')'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertTrue'
op|'('
name|'b'
op|'.'
name|'linked'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertEqual'
op|'('
name|'self'
op|'.'
name|'image'
op|'.'
name|'path'
op|','
name|'b'
op|'.'
name|'device'
op|')'
newline|'\n'
nl|'\n'
DECL|member|test_unget_dev
dedent|''
name|'def'
name|'test_unget_dev'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'tempdir'
op|'='
name|'self'
op|'.'
name|'useFixture'
op|'('
name|'fixtures'
op|'.'
name|'TempDir'
op|'('
op|')'
op|')'
op|'.'
name|'path'
newline|'\n'
name|'b'
op|'='
name|'block'
op|'.'
name|'BlockMount'
op|'('
name|'self'
op|'.'
name|'image'
op|','
name|'tempdir'
op|')'
newline|'\n'
nl|'\n'
name|'b'
op|'.'
name|'unget_dev'
op|'('
op|')'
newline|'\n'
nl|'\n'
name|'self'
op|'.'
name|'assertIsNone'
op|'('
name|'b'
op|'.'
name|'device'
op|')'
newline|'\n'
name|'self'
op|'.'
name|'assertFalse'
op|'('
name|'b'
op|'.'
name|'linked'
op|')'
newline|'\n'
dedent|''
dedent|''
endmarker|''
end_unit
|
py
|
1a59da3fcf7d44becc9b1a77f4c15836514fabfb
|
import binascii
import hashlib
import hmac
import os
import random
import struct
from time import time
import iofree
def pack_uint16(s):
return len(s).to_bytes(2, "big") + s
def sni(host):
return b"\x00\x00" + pack_uint16(pack_uint16(pack_uint16(b"\x00" + host)))
def pack_auth_data(key, session_id):
utc_time = int(time()) & 0xFFFFFFFF
data = struct.pack(">I", utc_time) + os.urandom(18)
data += hmac.new(key + session_id, data, hashlib.sha1).digest()[:10]
return data
@iofree.parser
def tls1_2_response(plugin):
tls_version = plugin.tls_version
with memoryview((yield from iofree.read(5))) as tls_plaintext_head:
assert (
tls_plaintext_head[:3] == b"\x16\x03\x03"
), "invalid tls head: handshake(22) protocol_version(3.1)"
length = int.from_bytes(tls_plaintext_head[-2:], "big")
assert length == length & 0x3FFF, f"{length} is over 2^14"
with memoryview((yield from iofree.read(length))) as fragment:
assert fragment[0] == 2, f"expect server_hello(2), bug got: {fragment[0]}"
handshake_length = int.from_bytes(fragment[1:4], "big")
server_hello = fragment[4 : handshake_length + 4]
assert server_hello[:2] == tls_version, "expect: server_version(3.3)"
verify_id = server_hello[2:34]
sha1 = hmac.new(
plugin.client.ns.cipher.master_key + plugin.session_id,
verify_id[:-10],
hashlib.sha1,
).digest()[:10]
assert sha1 == verify_id[-10:], "hmac verify failed"
assert server_hello[34] == 32, f"expect 32, but got {server_hello[34]}"
# verify_id = server_hello[35:67]
# sha1 = hmac.new(
# plugin.client.ns.cipher.master_key + plugin.session_id,
# fragment[:-10],
# hashlib.sha1,
# ).digest()[:10]
# assert sha1 == fragment[-10:], "hmac verify failed"
while True:
x = yield from iofree.peek(1)
if x[0] != 22:
break
with memoryview((yield from iofree.read(5))) as ticket_head:
length = int.from_bytes(ticket_head[-2:], "big")
assert length == length & 0x3FFF, f"{length} is over 2^14"
yield from iofree.read(length)
yield from ChangeCipherReader(
plugin, plugin.client.ns.cipher.master_key, plugin.session_id
)
yield from application_data(plugin)
@iofree.parser
def tls1_2_request(plugin):
parser = yield from iofree.get_parser()
tls_version = plugin.tls_version
with memoryview((yield from iofree.read(5))) as tls_plaintext_head:
assert (
tls_plaintext_head[:3] == b"\x16\x03\x01"
), "invalid tls head: handshake(22) protocol_version(3.1)"
length = int.from_bytes(tls_plaintext_head[-2:], "big")
assert length == length & 0x3FFF, f"{length} is over 2^14"
with memoryview((yield from iofree.read(length))) as fragment:
assert fragment[0] == 1, "expect client_hello(1), but got {fragment[0]}"
handshake_length = int.from_bytes(fragment[1:4], "big")
client_hello = fragment[4 : handshake_length + 4]
assert client_hello[:2] == tls_version, "expect: client_version(3.3)"
verify_id = client_hello[2:34]
# TODO: replay attact detect
gmt_unix_time = int.from_bytes(verify_id[:4], "big")
time_diff = (int(time()) & 0xFFFFFFFF) - gmt_unix_time
assert abs(time_diff) < plugin.time_tolerance, f"expired request: {time_diff}"
session_length = client_hello[34]
assert session_length >= 32, "session length should be >= 32"
session_id = client_hello[35 : 35 + session_length].tobytes()
sha1 = hmac.new(
plugin.server.cipher.master_key + session_id, verify_id[:22], hashlib.sha1
).digest()[:10]
assert verify_id[22:] == sha1, "hmac verify failed"
tail = client_hello[35 + session_length :]
cipher_suites = tail[:2].tobytes()
compression_methods = tail[2:3]
(cipher_suites, compression_methods)
random_bytes = pack_auth_data(plugin.server.cipher.master_key, session_id)
server_hello = (
tls_version
+ random_bytes
+ session_length.to_bytes(1, "big")
+ session_id
+ binascii.unhexlify(b"c02f000005ff01000100")
)
server_hello = b"\x02\x00" + pack_uint16(server_hello)
server_hello = b"\x16" + tls_version + pack_uint16(server_hello)
if random.randint(0, 8) < 1:
ticket = os.urandom((struct.unpack(">H", os.urandom(2))[0] % 164) * 2 + 64)
ticket = struct.pack(">H", len(ticket) + 4) + b"\x04\x00" + pack_uint16(ticket)
server_hello += b"\x16" + tls_version + ticket
change_cipher_spec = b"\x14" + tls_version + b"\x00\x01\x01"
finish_len = random.choice([32, 40])
change_cipher_spec += (
b"\x16"
+ tls_version
+ struct.pack(">H", finish_len)
+ os.urandom(finish_len - 10)
)
change_cipher_spec += hmac.new(
plugin.server.cipher.master_key + session_id, change_cipher_spec, hashlib.sha1
).digest()[:10]
parser.respond(data=server_hello + change_cipher_spec)
yield from ChangeCipherReader(plugin, plugin.server.cipher.master_key, session_id)
def ChangeCipherReader(plugin, key, session_id):
with memoryview((yield from iofree.read(11))) as data:
assert data[0] == 0x14, f"{data[0]} != change_cipher_spec(20) {data.tobytes()}"
assert (
data[1:3] == plugin.tls_version
), f"{data[1:3].tobytes()} != version({plugin.tls_version})"
assert data[3:6] == b"\x00\x01\x01", "bad ChangeCipherSpec"
assert data[6] == 0x16, f"{data[6]} != Finish(22)"
assert (
data[7:9] == plugin.tls_version
), f"{data[7:9]} != version({plugin.tls_version})"
assert data[9] == 0x00, f"{data[9]} != Finish(0)"
verify_len = int.from_bytes(data[9:11], "big")
with memoryview((yield from iofree.read(verify_len))) as verify:
sha1 = hmac.new(
key + session_id, b"".join([data, verify[:-10]]), hashlib.sha1
).digest()[:10]
assert sha1 == verify[-10:], "hmac verify failed"
@iofree.parser
def application_data(plugin):
parser = yield from iofree.get_parser()
while True:
with memoryview((yield from iofree.read(5))) as data:
assert (
data[0] == 0x17
), f"{data[0]} != application_data(23) {data.tobytes()}"
assert (
data[1:3] == plugin.tls_version
), f"{data[1:3].tobytes()} != version({plugin.tls_version})"
size = int.from_bytes(data[3:], "big")
assert size == size & 0x3FFF, f"{size} is over 2^14"
data = yield from iofree.read(size)
parser.respond(result=data)
|
py
|
1a59dafa5841cd613fa67fd2fdb8ee41ee16f09f
|
"""
Tests that rely on a server running
"""
import base64
import json
import datetime
import os
import pytest
from omnisci import connect, ProgrammingError, DatabaseError
from omnisci.cursor import Cursor
from omnisci._parsers import Description, ColumnDetails
from omnisci.thrift.ttypes import TOmniSciException
# XXX: Make it hashable to silence warnings; see if this can be done upstream
# This isn't a huge deal, but our testing context mangers for asserting
# exceptions need hashability
TOmniSciException.__hash__ = lambda x: id(x)
omniscihost = os.environ.get('OMNISCI_HOST', 'localhost')
@pytest.mark.usefixtures("omnisci_server")
class TestIntegration:
def test_connect_binary(self):
con = connect(
user="admin",
password='HyperInteractive',
host=omniscihost,
port=6274,
protocol='binary',
dbname='omnisci',
)
assert con is not None
def test_connect_http(self):
con = connect(
user="admin",
password='HyperInteractive',
host=omniscihost,
port=6278,
protocol='http',
dbname='omnisci',
)
assert con is not None
def test_connect_uri(self):
uri = (
'omnisci://admin:HyperInteractive@{0}:6274/omnisci?'
'protocol=binary'.format(omniscihost)
)
con = connect(uri=uri)
assert con._user == 'admin'
assert con._password == 'HyperInteractive'
assert con._host == omniscihost
assert con._port == 6274
assert con._dbname == 'omnisci'
assert con._protocol == 'binary'
def test_connect_uri_and_others_raises(self):
uri = (
'omnisci://admin:HyperInteractive@{0}:6274/omnisci?'
'protocol=binary'.format(omniscihost)
)
with pytest.raises(TypeError):
connect(username='omnisci', uri=uri)
def test_invalid_sql(self, con):
with pytest.raises(ProgrammingError) as r:
con.cursor().execute("this is invalid;")
r.match("SQL Error:")
def test_nonexistant_table(self, con):
with pytest.raises(DatabaseError) as r:
con.cursor().execute("select it from fake_table;")
r.match("Table 'FAKE_TABLE' does not exist|Object 'fake_table' not")
def test_connection_execute(self, con):
result = con.execute("drop table if exists FOO;")
result = con.execute("create table FOO (a int);")
assert isinstance(result, Cursor)
con.execute("drop table if exists FOO;")
def test_select_sets_description(self, con):
c = con.cursor()
c.execute('drop table if exists stocks;')
create = (
'create table stocks (date_ text, trans text, symbol text, '
'qty int, price float, vol float);'
)
c.execute(create)
i1 = "INSERT INTO stocks VALUES ('2006-01-05','BUY','RHAT',100,35.14,1.1);" # noqa
i2 = "INSERT INTO stocks VALUES ('2006-01-05','BUY','GOOG',100,12.14,1.2);" # noqa
c.execute(i1)
c.execute(i2)
c.execute("select * from stocks")
expected = [
Description('date_', 6, None, None, None, None, True),
Description('trans', 6, None, None, None, None, True),
Description('symbol', 6, None, None, None, None, True),
Description('qty', 1, None, None, None, None, True),
Description('price', 3, None, None, None, None, True),
Description('vol', 3, None, None, None, None, True),
]
assert c.description == expected
c.execute('drop table if exists stocks;')
def test_select_parametrized(self, con):
c = con.cursor()
c.execute('drop table if exists stocks;')
create = (
'create table stocks (date_ text, trans text, symbol text, '
'qty int, price float, vol float);'
)
c.execute(create)
i1 = "INSERT INTO stocks VALUES ('2006-01-05','BUY','RHAT',100,35.14,1.1);" # noqa
i2 = "INSERT INTO stocks VALUES ('2006-01-05','BUY','GOOG',100,12.14,1.2);" # noqa
c.execute(i1)
c.execute(i2)
c.execute(
'select symbol, qty from stocks where symbol = :symbol',
{'symbol': 'GOOG'},
)
result = list(c)
expected = [
('GOOG', 100),
] # noqa
assert result == expected
c.execute('drop table if exists stocks;')
def test_executemany_parametrized(self, con):
c = con.cursor()
c.execute('drop table if exists stocks;')
create = (
'create table stocks (date_ text, trans text, symbol text, '
'qty int, price float, vol float);'
)
c.execute(create)
i1 = "INSERT INTO stocks VALUES ('2006-01-05','BUY','RHAT',100,35.14,1.1);" # noqa
i2 = "INSERT INTO stocks VALUES ('2006-01-05','BUY','GOOG',100,12.14,1.2);" # noqa
c.execute(i1)
c.execute(i2)
parameters = [{'symbol': 'GOOG'}, {'symbol': "RHAT"}]
expected = [[('GOOG', 100)], [('RHAT', 100)]]
query = 'select symbol, qty from stocks where symbol = :symbol'
c = con.cursor()
result = c.executemany(query, parameters)
assert result == expected
c.execute('drop table if exists stocks;')
def test_executemany_parametrized_insert(self, con):
c = con.cursor()
c.execute('drop table if exists stocks;')
create = (
'create table stocks (date_ text, trans text, symbol text, '
'qty int, price float, vol float);'
)
c.execute(create)
i1 = "INSERT INTO stocks VALUES ('2006-01-05','BUY','RHAT',100,35.14,1.1);" # noqa
i2 = "INSERT INTO stocks VALUES ('2006-01-05','BUY','GOOG',100,12.14,1.2);" # noqa
c.execute(i1)
c.execute(i2)
c = con.cursor()
c.execute("drop table if exists stocks2;")
# Create table
c.execute('CREATE TABLE stocks2 (symbol text, qty int);')
params = [{"symbol": "GOOG", "qty": 10}, {"symbol": "AAPL", "qty": 20}]
query = "INSERT INTO stocks2 VALUES (:symbol, :qty);"
result = c.executemany(query, params)
assert result == [[], []] # TODO: not sure if this is standard
c.execute("drop table stocks2;")
c.execute('drop table if exists stocks;')
def test_fetchone(self, con):
c = con.cursor()
c.execute('drop table if exists stocks;')
create = (
'create table stocks (date_ text, trans text, symbol text, '
'qty int, price float, vol float);'
)
c.execute(create)
i1 = "INSERT INTO stocks VALUES ('2006-01-05','BUY','RHAT',100,35.14,1.1);" # noqa
i2 = "INSERT INTO stocks VALUES ('2006-01-05','BUY','GOOG',100,12.14,1.2);" # noqa
c.execute(i1)
c.execute(i2)
c.execute("select symbol, qty from stocks")
result = c.fetchone()
expected = ('RHAT', 100)
assert result == expected
c.execute('drop table if exists stocks;')
def test_fetchmany(self, con):
c = con.cursor()
c.execute('drop table if exists stocks;')
create = (
'create table stocks (date_ text, trans text, symbol text, '
'qty int, price float, vol float);'
)
c.execute(create)
i1 = "INSERT INTO stocks VALUES ('2006-01-05','BUY','RHAT',100,35.14,1.1);" # noqa
i2 = "INSERT INTO stocks VALUES ('2006-01-05','BUY','GOOG',100,12.14,1.2);" # noqa
c.execute(i1)
c.execute(i2)
c.execute("select symbol, qty from stocks")
result = c.fetchmany()
expected = [('RHAT', 100)]
assert result == expected
c.execute("select symbol, qty from stocks")
result = c.fetchmany(size=10)
expected = [('RHAT', 100), ('GOOG', 100)]
assert result == expected
c.execute('drop table if exists stocks;')
def test_select_dates(self, con):
c = con.cursor()
c.execute('drop table if exists dates;')
c.execute(
'create table dates (date_ DATE, datetime_ TIMESTAMP, '
'time_ TIME);'
)
i1 = (
"INSERT INTO dates VALUES ('2006-01-05','2006-01-01T12:00:00',"
"'12:00:00');"
)
i2 = (
"INSERT INTO dates VALUES ('1901-12-14','1901-12-13T20:45:53',"
"'23:59:00');"
)
c.execute(i1)
c.execute(i2)
result = list(c.execute("select * from dates"))
expected = [
(
datetime.date(2006, 1, 5),
datetime.datetime(2006, 1, 1, 12),
datetime.time(12),
),
(
datetime.date(1901, 12, 14),
datetime.datetime(1901, 12, 13, 20, 45, 53),
datetime.time(23, 59),
),
]
assert result == expected
c.execute('drop table if exists dates;')
class TestExtras:
def test_sql_validate(self, con):
from omnisci.common.ttypes import TTypeInfo
c = con.cursor()
c.execute('drop table if exists stocks;')
create = (
'create table stocks (date_ text, trans text, symbol text, '
'qty int, price float, vol float);'
)
c.execute(create)
q = "select * from stocks"
results = con._client.sql_validate(con._session, q)
col_names = sorted([r.col_name for r in results])
col_types = [r.col_type for r in results]
expected_col_names = [
'date_',
'price',
'qty',
'symbol',
'trans',
'vol',
]
expected_types = [
TTypeInfo(
type=6,
encoding=4,
nullable=True,
is_array=False,
precision=0,
scale=0,
comp_param=32,
size=-1,
),
TTypeInfo(
type=6,
encoding=4,
nullable=True,
is_array=False,
precision=0,
scale=0,
comp_param=32,
size=-1,
),
TTypeInfo(
type=6,
encoding=4,
nullable=True,
is_array=False,
precision=0,
scale=0,
comp_param=32,
size=-1,
),
TTypeInfo(
type=1,
encoding=0,
nullable=True,
is_array=False,
precision=0,
scale=0,
comp_param=0,
size=-1,
),
TTypeInfo(
type=3,
encoding=0,
nullable=True,
is_array=False,
precision=0,
scale=0,
comp_param=0,
size=-1,
),
TTypeInfo(
type=3,
encoding=0,
nullable=True,
is_array=False,
precision=0,
scale=0,
comp_param=0,
size=-1,
),
]
assert col_types == expected_types
assert col_names == expected_col_names
|
py
|
1a59db21d580e4ee837180af7a00b6ce8687fc7f
|
#
# Copyright (C) 2014, Zebra Technologies
# Authors: Matt Hooks <[email protected]>
# Zachary Lorusso <[email protected]>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
#
import sys
import traceback
from pysnmp.carrier.base import AbstractTransportDispatcher
from pysnmp.error import PySnmpError
try:
import asyncio
except ImportError:
import trollius as asyncio
loop = asyncio.get_event_loop()
class AsyncioDispatcher(AbstractTransportDispatcher):
"""AsyncioDispatcher based on asyncio event loop"""
def __init__(self, *args, **kwargs):
AbstractTransportDispatcher.__init__(self)
self.__transportCount = 0
if 'timeout' in kwargs:
self.setTimerResolution(kwargs['timeout'])
self.loopingcall = None
@asyncio.coroutine
def handle_timeout(self):
while True:
yield asyncio.From(asyncio.sleep(self.getTimerResolution()))
self.handleTimerTick(loop.time())
def runDispatcher(self, timeout=0.0):
if not loop.is_running():
try:
loop.run_forever()
except KeyboardInterrupt:
raise
except Exception:
raise error.PySnmpError(';'.join(traceback.format_exception(*sys.exc_info())))
def registerTransport(self, tDomain, transport):
if self.loopingcall is None and self.getTimerResolution() > 0:
self.loopingcall = asyncio.async(self.handle_timeout())
AbstractTransportDispatcher.registerTransport(
self, tDomain, transport
)
self.__transportCount = self.__transportCount + 1
def unregisterTransport(self, tDomain):
t = AbstractTransportDispatcher.getTransport(self, tDomain)
if t is not None:
AbstractTransportDispatcher.unregisterTransport(self, tDomain)
self.__transportCount = self.__transportCount - 1
# The last transport has been removed, stop the timeout
if self.__transportCount == 0 and not self.loopingcall.done():
self.loopingcall.cancel()
self.loopingcall = None
# Trollius or Tulip?
if not hasattr(asyncio, "From"):
exec(
"""\
@asyncio.coroutine
def handle_timeout(self):
while True:
yield from asyncio.sleep(self.getTimerResolution())
self.handleTimerTick(loop.time())
AsyncioDispatcher.handle_timeout = handle_timeout\
"""
)
|
py
|
1a59db9f1e2f092dd580abcf5c5c75ccca61623a
|
import discord, asyncio, random, time
from . import world, worldToImage, rpglang, menus
from .datatypes import RPGUser, BiomeType, LocationType, Biome, Location, ItemType, Item, Weapon, WeaponType, WAttribute, Chunk
from libs import modutil
from ..rpg import rpgcmd
from discord.ext import commands
import discord
def check_if_user_is_loaded(ctx):
return RPGUser.isLoaded(ctx.message.author.id)
@rpgcmd.command(name="user")
async def userSubCommand(ctx, *args):
_u = RPGUser.get(args[0])
_toSend = "```\n---- USER ----\nID: {id}\nPosition: ({x}, {y})\nFighters: {fighters}\nTutorialDone: {tutorialState}\nInventory: {inv}```".format(
id = _u.id,
x = _u.x,
y = _u.y,
fighters = ", ".join(list(map(lambda f: f.name, _u.fighters))),
tutorialState = str(_u.tutorialDone),
inv = ", ".join(list(map(lambda f: f.getFullName(), _u.inventory))) if len(_u.inventory) > 0 else "(Nothing)"
)
await ctx.send(_toSend)
@rpgcmd.command(name="loc")
async def infoSubCommand(ctx, *args):
_x = int(args[0])
_y = int(args[1])
_b = world.getPos(_x, _y)
_toSend = "```\n---- BIOME ----\nType: {type}\nx, y: ({x}, {y})\nLoot: {loot}\nLocation Connected: {location}".format(
type = _b.stype,
x = _b.x,
y = _b.y,
loot = ", ".join(list(map(lambda l: l.getType().name,_b.loot))) if len(_b.loot) > 0 else "(Nothing)",
location = _b.locationConnected.getType().systemName if _b.locationConnected != None else "None"
)
if _b.locationConnected != None:
_l = _b.locationConnected
_toSend = _toSend + "\n\n---- LOCATION ----\nType: {type}\nrecursion: {recursion}\nConnections: {connections}\nLoot: {loot}".format(
type = _l.stype,
recursion = _l.recursion,
connections = ", ".join(list(map(lambda l: l.getType().systemName, _l.connections))) if len(_l.connections) > 0 else "(Nothing)",
loot = ", ".join(list(map(lambda l: l.getType().systemName, _l.loot))) if len(_l.loot) > 0 else "(Nothing)"
)
await ctx.send(_toSend + "```")
@rpgcmd.command(name="tp")
async def tpSubCommand(ctx, *args):
if len(args) == 1: # tp player
user = RPGUser.get(ctx.message.author.id)
_x = int(RPGUser.get(args[0]).x)
_y = int(RPGUser.get(args[0]).y)
elif len(args) == 2: # tp x y
_x = int(args[0])
_y = int(args[1])
user = RPGUser.get(ctx.message.author.id)
elif len(args) == 3: # tp player x y
user = RPGUser.get(args[0])
_x = int(args[1])
_y = int(args[2])
else:
await ctx.send("`Usage: rpg tp <player [x y] / x y>`")
return
if user==None:
await ctx.send(rpglang.getl("USER_NOT_REGISTERED"))
return
user.setPos(_x, _y)
await ctx.send("Teleported {} to ({}, {}).".format(user.getName(), _x, _y))
@rpgcmd.group(invoke_without_command=True, name="data")
async def datacmd(ctx):
await ctx.send("`Usage: rpg data <biome/location/weapon/wattribute> [systemname]`")
@datacmd.command(name="biome")
async def data_biomecmd(ctx, *args):
if len(args) > 0:
_x = list(filter(lambda n: n.systemName == args[0], BiomeType.all.values()))[0]
_toSend = "```\n--------- Biome ---------\nname: {name}\npossibleBiomesNear: {possibleBiomesNear}\npossibleLocations: {possibleLocations}\ngender: {gender}\nemoji: {emoji}```".format(
name = _x.name,
possibleBiomesNear = ",".join(list(map(lambda n: n.name, _x.possibleBiomesNear.keys()))),
possibleLocations = ",".join(list(map(lambda n: n.name, _x.possibleLocations))),
gender = "Female" if _x.gender else "Male",
emoji = _x.emoji)
await ctx.send(_toSend)
else:
await ctx.send("```\nBiomes:\n{}```".format("\n".join(list(map(lambda b: b.systemName,BiomeType.all.values())))))
@datacmd.command(name="location")
async def data_locationcmd(ctx, *args):
await ctx.send("```\nLocations:\n{}```".format("\n".join(list(map(lambda b: b.systemName,LocationType.all.values())))))
@datacmd.command(name="weapon")
async def data_weaponcmd(ctx, *args):
await ctx.send("```\nWeapons:\n{}```".format("\n".join(list(map(lambda b: b.systemName,WeaponType.all.values())))))
@datacmd.command(name="wattribute")
async def data_wattcmd(ctx, *args):
if len(args) > 0:
_x = list(filter(lambda n: n.nameM == args[0], WAttribute.all))[0]
_toSend = "```\n--------- wAttribute ---------\nnameM: {nameM}\nnameF: {nameF}\ntype: {type}\nclassification: {classification}\nattackMod: {attackMod}\nagilityMod: {agilityMod}```".format(
nameM = _x.nameM,
nameF = _x.nameF,
type = _x.type,
classification = _x.classification,
attackMod = _x.attackMod,
agilityMod = _x.agilityMod)
await ctx.send(_toSend)
else:
await ctx.send("```\nWeapon Adjectives:\n{}```".format("\n".join(list(map(lambda b: b.nameM,WAttribute.all)))))
# Sends a map of the world to a channel.
@rpgcmd.command(name="map")
async def worldSubCommand(ctx, *args):
user = RPGUser.get(ctx.message.author.id)
if user==None:
await ctx.send(rpglang.getl("USER_NOT_REGISTERED"))
return
cx = int(args[0]) * Chunk.size
cy = int(args[1]) * Chunk.size
ux = user.x
uy = user.y
worldmap = ""
for iy in range(cx, cx + Chunk.size):
for ix in range(cy, cy + Chunk.size):
if ux == ix and uy == iy:
worldmap += "X"
elif world.getPos(ix,iy).locationConnected != None:
worldmap += "C"
else:
worldmap += world.getPos(ix,iy).getType().emoji
worldmap += "\n"
worldmap.rstrip("\n")
worldToImage.represent(worldmap, modutil.absolutePath + "/map.png")
await ctx.send(file=discord.File(modutil.absolutePath + "/map.png"))
@rpgcmd.command(name="save")
async def saveSubCommand(ctx, *args):
RPGUser.saveAll()
await ctx.send("Saved!")
@rpgcmd.command(name="inv")
async def invSubCommand(ctx):
user = RPGUser.get(ctx.message.author.id)
if user==None:
await ctx.send(rpglang.getl("USER_NOT_REGISTERED"))
return
await menus.showInventoryMenu(ctx, user)
@rpgcmd.command(name="s")
async def startSubCommand(ctx):
user = RPGUser.get(ctx.message.author.id)
if user==None:
await ctx.send(rpglang.getl("USER_NOT_REGISTERED"))
return
if not user.tutorialDone:
await menus.showTutorialMenu(ctx, user)
else:
await menus.showActionMenu(ctx, user, rpglang.getl("CONTINUE_ADVENTURE"))
|
py
|
1a59dbbd90054e54f4c4a2a09b8e2310b3d7897b
|
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import tensorflow as tf
from bigdl.nano.tf.keras.training_utils import TrainingUtils
class Sequential(TrainingUtils, tf.keras.Sequential):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
|
py
|
1a59dbeb8cf2eabf3f30b5ee465ce30bc73e92af
|
# Generated by Django 2.1.5 on 2019-02-06 14:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0002_auto_20190206_1435'),
]
operations = [
migrations.AlterField(
model_name='exam',
name='allowed_attendance',
field=models.PositiveIntegerField(default=70),
),
migrations.AlterField(
model_name='exam',
name='attendance_fine',
field=models.PositiveIntegerField(default=600),
),
migrations.AlterField(
model_name='exam',
name='fees_per_credit',
field=models.PositiveIntegerField(default=50),
),
migrations.AlterField(
model_name='exam',
name='fined_attendance',
field=models.PositiveIntegerField(default=60),
),
]
|
py
|
1a59dc471a4efc6c803b9c682f823c9a7d03494b
|
# -*- coding: utf-8 -*-
'''
# Copyright (c) Microsoft Corporation. All Rights Reserved. Licensed under the MIT License. See License in the project root for license information.
#
# This file was generated and any changes will be overwritten.
'''
from __future__ import unicode_literals
from ..model.external_link import ExternalLink
from ..one_drive_object_base import OneDriveObjectBase
class NotebookLinks(OneDriveObjectBase):
def __init__(self, prop_dict={}):
self._prop_dict = prop_dict
@property
def one_note_client_url(self):
"""
Gets and sets the oneNoteClientUrl
Returns:
:class:`ExternalLink<onedrivesdk.model.external_link.ExternalLink>`:
The oneNoteClientUrl
"""
if "oneNoteClientUrl" in self._prop_dict:
if isinstance(self._prop_dict["oneNoteClientUrl"], OneDriveObjectBase):
return self._prop_dict["oneNoteClientUrl"]
else :
self._prop_dict["oneNoteClientUrl"] = ExternalLink(self._prop_dict["oneNoteClientUrl"])
return self._prop_dict["oneNoteClientUrl"]
return None
@one_note_client_url.setter
def one_note_client_url(self, val):
self._prop_dict["oneNoteClientUrl"] = val
@property
def one_note_web_url(self):
"""
Gets and sets the oneNoteWebUrl
Returns:
:class:`ExternalLink<onedrivesdk.model.external_link.ExternalLink>`:
The oneNoteWebUrl
"""
if "oneNoteWebUrl" in self._prop_dict:
if isinstance(self._prop_dict["oneNoteWebUrl"], OneDriveObjectBase):
return self._prop_dict["oneNoteWebUrl"]
else :
self._prop_dict["oneNoteWebUrl"] = ExternalLink(self._prop_dict["oneNoteWebUrl"])
return self._prop_dict["oneNoteWebUrl"]
return None
@one_note_web_url.setter
def one_note_web_url(self, val):
self._prop_dict["oneNoteWebUrl"] = val
|
py
|
1a59dc991a2b43327227ad103b3eeb5b622cf640
|
import numpy as np
import sys
from datetime import datetime
from tools_LT import get_info, get_grads_all, get_evar4d
def main( exp, stime, typ , tlev, vname_list=["U"], nens=80 ):
INFO = get_info( exp )
if typ != "fcst":
nens = 1
for vname in vname_list:
print(vname)
var4d = get_evar4d(INFO, vname=vname, tlev=tlev, typ=typ,
stime=stime, member=nens )
print("Done", vname, "t:", tlev, var4d.shape, np.max(var4d), np.min(var4d) )
exp = "2000m_DA_1022_FIR2km_N"
stime = datetime(2001,1,1,1,10)
typ = "anal"
tmax = 2
nens = 80
typ = "fcst"
exp = "2000m_NODA_1022_FIR2km_N"
stime = datetime(2001,1,1,1,0)
tmax = 13
nens = 1
typ = "fcst"
exp = "2000m_DA_0306"
#exp = "2000m_DA_0306_R_FP_DEBUG32_LOC90km_SINGLE"
#
#exp = "2000m_DA_0306_R_FP_DEBUG32_LOC90km_SINGLE0.1"
#exp = "2000m_DA_0306_R_FP_DEBUG32_LOC90km_SINGLE0.1_I98_J106"
exp = "2000m_DA_0306_FP_M32_LOC90km"
exp = "2000m_DA_0306_FP_M32_LOC90km_HT8"
exp = "2000m_DA_0306_FP_M32_LOC90km_QC5"
exp = "2000m_DA_0306_FP_M32_LOC90km_ZMAX23"
exp = "2000m_DA_0306_FP_M32_LOC90km"
#exp = "2000m_DA_0306_NOFP"
exp = "2000m_NODA_0306"
exp = "2000m_DA_0601"
exp = "2000m_DA_0601"
exp = "2000m_DA_0601_FP_M32_LOC90km"
exp = "2000m_DA_0601_FP_M01_LOC90km"
exp = "2000m_DA_0601_FP_M32_LOC90km_TEST"
exp = "2000m_DA_0601_FP_M32_LOC30km_TEST"
exp = "2000m_DA_0601_FP_M32_LOC30km_TEST2"
exp = "2000m_DA_0601_FP_M32_LOC30km"
exp = "2000m_DA_0601_FP_M32_LOC30km_LOG"
exp = "2000m_DA_0601_FP_M160_LOC30km"
exp = "2000m_DA_0601_FP_M160_LOC30km_POB"
exp = "2000m_DA_0601_FP_M160_LOC30km_NOB"
exp = "2000m_DA_0601_FP_M600_M32_LOC30km_NOB"
exp = "2000m_DA_0601_FP_M600_M160_LOC30km_NOB"
exp = "2000m_DA_0601_FP_M600_M160_LOC30km_NOB_LOG"
exp = "2000m_DA_0601_FP_GT"
exp = "2000m_DA_0601_FP_GT_POB"
#exp = "2000m_DA_0601_FP_GT_NOB"
#exp = "2000m_DA_0601_NOFP"
exp = "2000m_DA_0601"
exp = "2000m_DA_0723_FP"
#exp = "2000m_DA_0723_NOFP"
exp = "2000m_DA_0723_FP_M160"
exp = "2000m_DA_0723_FP_NOB"
exp = "2000m_DA_0723_FP_NOB_OBERR0.1"
exp = "2000m_DA_0723_FP_NOB_30km"
exp = "2000m_DA_0723"
exp = "2000m_DA_0723_FP_30min_NOB"
exp = "2000m_DA_0723_FP_30min"
exp = "2000m_DA_0723_NOFP_30min"
exp = "2000m_DA_0723_FP_30min_HT8"
exp = "2000m_DA_0723_FP_30min_M64"
exp = "2000m_DA_0723_FP_30min_NOB_M64"
exp = "2000m_DA_0723_FP_30min_M160"
exp = "2000m_DA_0723_FP_30min_M160_POB"
exp = "2000m_DA_0723_FP_30min_M160_GE3"
exp = "2000m_DA_0723_FP_30min_LOC30km"
exp = "2000m_NODA_0723"
exp = "2000m_DA_0723_FP_30min_LOC10km"
exp = "2000m_DA_0723_FP_30min_LOC30km_X175km_Y183km"
exp = "2000m_DA_0723_FP_30min_LOC90km_X175km_Y183km_LOG"
exp = "2000m_DA_0723_FP_30min_LOC90km_X175km_Y183km"
exp = "2000m_DA_0723_FP_30min_LOC90km_LOC2D"
exp = "2000m_DA_0723_FP_30min_LOC10km_X167km_Y223km"
exp = "2000m_DA_0723_FP_30min_LOC10km"
exp = "2000m_DA_0723_FP_30min_LOC10km_LOG"
exp = "2000m_DA_0723_FP_30min_LOC10km_VLOCW20"
exp = "2000m_DA_0723_FP_30min_LOC10km_VLOC30km"
exp = "2000m_DA_0723_FP_30min_LOC30km_X183km_Y199km"
exp = "2000m_DA_0723_Z20km_FP_30min_LOC30km"
exp = "2000m_DA_0723_FP_30min_LOC30km_COERR0.2"
exp = "2000m_DA_0723_FP_30min_LOC20km"
exp = "2000m_DA_0723_FP_30min_LOC20km_X159km_Y231km"
exp = "2000m_DA_0723_FP_30min_LOC20km_X159km_Y207km"
exp = "2000m_DA_0723_FP_30min_LOC20km_M240"
exp = "2000m_DA_0723_FP_30min_LOC20km_M240_NOG"
exp = "2000m_DA_0723_FP_30min_LOC20km_M240_NOG_guess"
exp = "2000m_DA_0723_FP_30min_LOC20km_M240_NOCLROBS"
stime = datetime(2001,1,1,1,40)
stime = datetime(2001,1,1,1,30)
stime = datetime( 2001, 1, 1, 1, 30 )
stime = datetime( 2001, 1, 1, 2, 0, 0 )
#stime = datetime( 2001, 1, 1, 1, 0 )
tmax = 19
tmax = 7
#tmax = 13
#tmax = 3
#nens = 80
#tmax = 1
nens = 0 # mean only
#nens = 320
typ = "fcst"
vname_list = ["U", "V", "W", "T", "P", "QV",
"QC", "QR", "QI", "QS", "QG",
"CC", "CR", "CI", "CS", "CG",
"FP", "EX", "EY", "EZ",
]
#vname_list = ["FP", ]
for tlev in range( 0, tmax ):
main( exp, stime, typ, tlev, vname_list=vname_list, nens=nens )
|
py
|
1a59def8f4b532194dcaef21fcf1d34aad547a3a
|
import argparse
import logging
import os
import random
import numpy as np
import torch
import torch.backends.cudnn as cudnn
from networks.vit_seg_modeling import VisionTransformer as ViT_seg
from networks.vit_seg_modeling import CONFIGS as CONFIGS_ViT_seg
from trainer import trainer_synapse
from utils import Params
params=Params("./params.json")
parser = argparse.ArgumentParser()
parser.add_argument('--root_path', type=str,
default='../data/Synapse/train_npz', help='root dir for data')
parser.add_argument('--dataset', type=str,
default='Synapse', help='experiment_name')
parser.add_argument('--list_dir', type=str,
default='./lists/lists_Synapse', help='list dir')
parser.add_argument('--num_classes', type=int,
default=14, help='output channel of network')
parser.add_argument('--max_iterations', type=int,
default=30000, help='maximum epoch number to train')
parser.add_argument('--max_epochs', type=int,
default=150, help='maximum epoch number to train')
parser.add_argument('--batch_size', type=int,
default=2, help='batch_size per gpu')
parser.add_argument('--n_gpu', type=int, default=1, help='total gpu')
parser.add_argument('--deterministic', type=int, default=1,
help='whether use deterministic training')
parser.add_argument('--base_lr', type=float, default=0.01,
help='segmentation network learning rate')
parser.add_argument('--img_size', type=int,
default=224, help='input patch size of network input')
parser.add_argument('--seed', type=int,
default=1234, help='random seed')
parser.add_argument('--n_skip', type=int,
default=3, help='using number of skip-connect, default is num')
parser.add_argument('--vit_name', type=str,
default='R50-ViT-B_16', help='select one vit model')
parser.add_argument('--vit_patches_size', type=int,
default=16, help='vit_patches_size, default is 16')
args = parser.parse_args()
if __name__ == "__main__":
if not args.deterministic:
cudnn.benchmark = True
cudnn.deterministic = False
else:
cudnn.benchmark = False
cudnn.deterministic = True
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
#torch.cuda.manual_seed(args.seed)
dataset_name = args.dataset
dataset_config = {
'Synapse': {
'root_path': '../data/Synapse/train_npz',
'list_dir': './lists/lists_Synapse',
'num_classes': params.num_classes,
},
}
args.num_classes = dataset_config[dataset_name]['num_classes']
args.root_path = dataset_config[dataset_name]['root_path']
args.list_dir = dataset_config[dataset_name]['list_dir']
args.is_pretrain = True
args.exp = 'TU_' + dataset_name + str(args.img_size)
snapshot_path = "../model/{}/{}".format(args.exp, 'TU')
snapshot_path = snapshot_path + '_pretrain' if args.is_pretrain else snapshot_path
snapshot_path += '_' + args.vit_name
snapshot_path = snapshot_path + '_skip' + str(args.n_skip)
snapshot_path = snapshot_path + '_vitpatch' + str(args.vit_patches_size) if args.vit_patches_size!=16 else snapshot_path
snapshot_path = snapshot_path+'_'+str(args.max_iterations)[0:2]+'k' if args.max_iterations != 30000 else snapshot_path
snapshot_path = snapshot_path + '_epo' +str(args.max_epochs) if args.max_epochs != 30 else snapshot_path
snapshot_path = snapshot_path+'_bs'+str(args.batch_size)
snapshot_path = snapshot_path + '_lr' + str(args.base_lr) if args.base_lr != 0.01 else snapshot_path
snapshot_path = snapshot_path + '_'+str(args.img_size)
snapshot_path = snapshot_path + '_s'+str(args.seed) if args.seed!=1234 else snapshot_path
if not os.path.exists(snapshot_path):
os.makedirs(snapshot_path)
config_vit = CONFIGS_ViT_seg[args.vit_name]
config_vit.n_classes = args.num_classes
config_vit.n_skip = args.n_skip
if args.vit_name.find('R50') != -1:
config_vit.patches.grid = (int(args.img_size / args.vit_patches_size), int(args.img_size / args.vit_patches_size))
net = ViT_seg(config_vit, img_size=args.img_size, num_classes=config_vit.n_classes)#.cuda()
if torch.cuda.is_available():
net=net.cuda()
net.load_from(weights=np.load(config_vit.pretrained_path))
trainer = {'Synapse': trainer_synapse,}
trainer[dataset_name](args, net, snapshot_path)
|
py
|
1a59dfbce99ca773f87ec8404f4d0dd3bdb122ac
|
from numpy import array, exp, linspace, sqrt, pi
import matplotlib.pyplot as plt
# Suppose we have the following dataset, which we believe is described by a
# Gaussian peak plus a constant background. Our goal in this example is to
# infer the area of the Gaussian.
x_data = [0.00, 0.80, 1.60, 2.40, 3.20, 4.00, 4.80, 5.60,
6.40, 7.20, 8.00, 8.80, 9.60, 10.4, 11.2, 12.0]
y_data = [2.473, 1.329, 2.370, 1.135, 5.861, 7.045, 9.942, 7.335,
3.329, 5.348, 1.462, 2.476, 3.096, 0.784, 3.342, 1.877]
y_error = [1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1.]
plt.errorbar(x_data, y_data, yerr=y_error, ls='dashed', marker='D', c='red', markerfacecolor='none')
plt.ylabel('y')
plt.xlabel('x')
plt.grid()
plt.show()
# The first step is to implement our model. For simple models like this one
# this can be done using just a function, but as models become more complex
# it is becomes useful to build them as classes.
class PeakModel(object):
def __init__(self, x_data):
"""
The __init__ should be used to pass in any data which is required
by the model to produce predictions of the y-data values.
"""
self.x = x_data
def __call__(self, theta):
return self.forward_model(self.x, theta)
@staticmethod
def forward_model(x, theta):
"""
The forward model must make a prediction of the experimental data we would expect to measure
given a specific set model parameters 'theta'.
"""
# unpack the model parameters
area, width, center, background = theta
# return the prediction of the data
z = (x - center) / width
gaussian = exp(-0.5*z**2)/(sqrt(2*pi)*width)
return area*gaussian + background
# Inference-tools has a variety of Likelihood classes which allow you to easily construct a
# likelihood function given the measured data and your forward-model.
from inference.likelihoods import GaussianLikelihood
likelihood = GaussianLikelihood(y_data=y_data, sigma=y_error, forward_model=PeakModel(x_data))
# Instances of the likelihood classes can be called as functions, and return the log-likelihood
# when passed a vector of model parameters:
initial_guess = array([10., 2., 5., 2.])
guess_log_likelihood = likelihood(initial_guess)
print(guess_log_likelihood)
# We could at this stage pair the likelihood object with an optimiser in order to obtain
# the maximum-likelihood estimate of the parameters. In this example however, we want to
# construct the posterior distribution for the model parameters, and that means we need
# a prior.
# The inference.priors module contains classes which allow for easy construction of
# prior distributions across all model parameters.
from inference.priors import ExponentialPrior, UniformPrior, JointPrior
# If we want different model parameters to have different prior distributions, as in this
# case where we give three variables an exponential prior and one a uniform prior, we first
# construct each type of prior separately:
prior_components = [
ExponentialPrior(beta=[50., 20., 20.], variable_indices=[0, 1, 3]),
UniformPrior(lower=0., upper=12., variable_indices=[2])
]
# Now we use the JointPrior class to combine the various components into a single prior
# distribution which covers all the model parameters.
prior = JointPrior(components=prior_components, n_variables=4)
# As with the likelihood, prior objects can also be called as function to return a
# log-probability value when passed a vector of model parameters. We can also draw
# samples from the prior directly using the sample() method:
prior_sample = prior.sample()
print(prior_sample)
# The likelihood and prior can be easily combined into a posterior distribution
# using the Posterior class:
from inference.posterior import Posterior
posterior = Posterior(likelihood=likelihood, prior=prior)
# Now we have constructed a posterior distribution, we can sample from it
# using Markov-chain Monte-Carlo (MCMC).
# The inference.mcmc module contains implementations of various MCMC sampling algorithms.
# Here we import the PcaChain class and use it to create a Markov-chain object:
from inference.mcmc import PcaChain
chain = PcaChain(posterior=posterior, start=initial_guess)
# We generate samples by advancing the chain by a chosen number of steps using the advance method:
chain.advance(25000)
# we can check the status of the chain using the plot_diagnostics method:
chain.plot_diagnostics()
# The burn-in (how many samples from the start of the chain are discarded)
# can be chosen by setting the burn attribute of the chain object:
chain.burn = 5000
# we can get a quick overview of the posterior using the matrix_plot method
# of chain objects, which plots all possible 1D & 2D marginal distributions
# of the full parameter set (or a chosen sub-set).
chain.matrix_plot(labels=['area', 'width', 'center', 'background'])
# We can easily estimate 1D marginal distributions for any parameter
# using the get_marginal method:
area_pdf = chain.get_marginal(0)
area_pdf.plot_summary(label='Gaussian area')
# We can assess the level of uncertainty in the model predictions by passing each sample
# through the forward-model and observing the distribution of model expressions that result:
# generate an axis on which to evaluate the model
x_fits = linspace(0, 12, 500)
# get the sample
sample = chain.get_sample()
# pass each through the forward model
curves = array([PeakModel.forward_model(x_fits, theta) for theta in sample])
# We could plot the predictions for each sample all on a single graph, but this is
# often cluttered and difficult to interpret.
# A better option is to use the hdi_plot function from the plotting module to plot
# highest-density intervals for each point where the model is evaluated:
from inference.plotting import hdi_plot
plt.figure(figsize=(8, 5))
hdi_plot(x_fits, curves, intervals=[0.68, 0.95])
# plot the MAP estimate (the sample with the single highest posterior probability)
plt.plot(x_fits, PeakModel.forward_model(x_fits, chain.mode()), ls='dashed', lw=3, c='C0', label='MAP estimate')
# build the rest of the plot
plt.errorbar(x_data, y_data, yerr=y_error, linestyle='none', c='red', label='data',
marker='D', markerfacecolor='none', markeredgewidth=1.5, markersize=6)
plt.xlabel('x')
plt.ylabel('y')
plt.legend()
plt.grid()
plt.tight_layout()
plt.show()
|
py
|
1a59e030bcf67dc551c399fbae4c1e98030958eb
|
import dash_html_components as html
import dash_core_components as dcc
import dash_bootstrap_components as dbc
from datetime import datetime
import dateutil.relativedelta
from plotly.subplots import make_subplots
class marketcapViewClass:
def getMarketcapContent(self, data, bgImage):
content = [dbc.Modal([dbc.ModalHeader("Info DefiChain Market Cap"),
dbc.ModalBody(self.getMarketcapExplanation()),
dbc.ModalFooter(dbc.Button("close", id="closeInfoMarketcap", className="ml-auto"))],
id="modalMarketcap", size='xl'),
html.Div(id='hidden', style = {'display':'none'}),
dbc.Card(dbc.CardBody([html.H4(['DefiChain Market Cap']),
html.Table([html.Tr([html.Td('Select currency for Market Cap representation:'),
html.Td(dcc.Dropdown(id='marketCapCurrencySelection', options=[{'label': 'USD', 'value': 'USD'},
{'label': 'BTC', 'value': 'BTC'}],
value='USD', clearable=False, style=dict(width='150px', verticalAlign="bottom")))])]),
dbc.Col(dcc.Graph(config={'displayModeBar': False}, id='figureMarketcap')),
dbc.Row(dbc.Col(dbc.Button("Info/Explanation", id="openInfoMarketcap")))
]))]
return content
@staticmethod
def createMarketCapFig(data, selection, bgImage):
figMarketcap = make_subplots(
rows=1, cols=1,
vertical_spacing=0.15,
row_width=[1], # from bottom to top
specs=[[{}]],
shared_xaxes=True,
subplot_titles=([]))
if selection == 'BTC':
columnName = 'marketCapBTC'
yAxisLabel = 'Market Cap in BTC'
hoverTemplateRepresenation = '%{y:,.2f}BTC'
else:
columnName = 'marketCapUSD'
yAxisLabel = 'Market Cap in $'
hoverTemplateRepresenation = '$%{y:,.0f}'
lastValidDate = datetime.strptime(data[columnName].dropna().index.values[-1], '%Y-%m-%d')
date2MonthsBack = lastValidDate - dateutil.relativedelta.relativedelta(months=2)
trace_marketcap = dict(type='scatter', name='Market Cap',
x=data[columnName].dropna().index.values, y=data[columnName].dropna().values,
mode='lines', line=dict(color='#ff00af'), line_width=2, hovertemplate=hoverTemplateRepresenation)
figMarketcap.add_trace(trace_marketcap, 1, 1)
figMarketcap.update_yaxes(title_text=yAxisLabel, tickformat=",.0f", gridcolor='#6c757d', color='#6c757d', zerolinecolor='#6c757d', row=1,
col=1) # ,range=[-50, 200]
figMarketcap.update_xaxes(title_text="Date", gridcolor='#6c757d', color='#6c757d', zerolinecolor='#6c757d',
range=[date2MonthsBack.strftime('%Y-%m-%d'), lastValidDate], row=1, col=1)
# Add range slider
figMarketcap.update_layout(xaxis=dict(
rangeselector=dict(
buttons=list([dict(count=30, label="30d", step="day", stepmode="backward"),
dict(count=2, label="2m", step="month", stepmode="backward"),
dict(count=6, label="6m", step="month", stepmode="backward"),
dict(count=1, label="YTD", step="year", stepmode="todate"),
dict(count=1, label="1y", step="year", stepmode="backward"),
dict(step="all")])),
rangeslider=dict(visible=False),
type="date"))
# add background picture
figMarketcap.add_layout_image(dict(source=bgImage, xref="paper", yref="paper", x=0.5, y=0.5, sizex=0.6, sizey=0.6, xanchor="center", yanchor="middle", opacity=0.2))
figMarketcap.update_layout(margin={"t": 60, "l": 0, "b": 0, 'r': 0},
hovermode='x unified',
hoverlabel=dict(font_color="#6c757d",
bgcolor='#ffffff', ),
legend=dict(orientation="h",
yanchor="top",
y=-0.12,
xanchor="right",
x=1),
)
figMarketcap.layout.plot_bgcolor = '#ffffff' # background plotting area
figMarketcap.layout.paper_bgcolor = 'rgba(0,0,0,0)' # background around plotting area
figMarketcap.layout.legend.font.color = '#6c757d' # font color legend
return figMarketcap
@staticmethod
def getMarketcapExplanation():
mcCardExplanation = [html.P(['The market cap of a cryptocurrency coin is the product of the circulating coin amount and the coin price. It is used to compare coins against each other, '
'because the price alone has no meaning. Beside the common used USD representation, you can also choose a BTC representation. Due to the strong correlation '
'of DFI and BTC, this could give more insights in the development of DefiChain.' ],style={'text-align': 'justify'}),
html.P([html.B('Hint:'),' The presented diagrams are interactive. You can zoom in (select range with mouse) and rescale (double-click in diagram) as you like.'
' For specific questions it could be helpful to only show a selection of the available data. To exclude entries from the graph click on the corresponding legend entry.'],
style={'text-align': 'justify', 'fontSize':'0.7rem','color':'#6c757d'})
]
return mcCardExplanation
|
py
|
1a59e066879cc30beaed1c2b8c0b4c8a9efa6d1f
|
""" define the IntervalIndex """
from operator import le, lt
import textwrap
from typing import Any, Optional, Tuple, Union
import numpy as np
from pandas._config import get_option
from pandas._libs import lib
from pandas._libs.interval import Interval, IntervalMixin, IntervalTree
from pandas._libs.tslibs import Timedelta, Timestamp, to_offset
from pandas._typing import AnyArrayLike, Label
from pandas.util._decorators import Appender, Substitution, cache_readonly
from pandas.util._exceptions import rewrite_exception
from pandas.core.dtypes.cast import (
find_common_type,
infer_dtype_from_scalar,
maybe_downcast_to_dtype,
)
from pandas.core.dtypes.common import (
ensure_platform_int,
is_categorical_dtype,
is_datetime64tz_dtype,
is_datetime_or_timedelta_dtype,
is_dtype_equal,
is_float,
is_float_dtype,
is_integer,
is_integer_dtype,
is_interval_dtype,
is_list_like,
is_number,
is_object_dtype,
is_scalar,
)
from pandas.core.dtypes.missing import isna
from pandas.core.algorithms import take_1d
from pandas.core.arrays.interval import IntervalArray, _interval_shared_docs
import pandas.core.common as com
from pandas.core.indexers import is_valid_positional_slice
import pandas.core.indexes.base as ibase
from pandas.core.indexes.base import (
Index,
InvalidIndexError,
_index_shared_docs,
default_pprint,
ensure_index,
maybe_extract_name,
)
from pandas.core.indexes.datetimes import DatetimeIndex, date_range
from pandas.core.indexes.extension import ExtensionIndex, inherit_names
from pandas.core.indexes.multi import MultiIndex
from pandas.core.indexes.timedeltas import TimedeltaIndex, timedelta_range
from pandas.core.ops import get_op_result_name
from pandas.tseries.offsets import DateOffset
_VALID_CLOSED = {"left", "right", "both", "neither"}
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
_index_doc_kwargs.update(
dict(
klass="IntervalIndex",
qualname="IntervalIndex",
target_klass="IntervalIndex or list of Intervals",
name=textwrap.dedent(
"""\
name : object, optional
Name to be stored in the index.
"""
),
)
)
def _get_next_label(label):
dtype = getattr(label, "dtype", type(label))
if isinstance(label, (Timestamp, Timedelta)):
dtype = "datetime64"
if is_datetime_or_timedelta_dtype(dtype) or is_datetime64tz_dtype(dtype):
return label + np.timedelta64(1, "ns")
elif is_integer_dtype(dtype):
return label + 1
elif is_float_dtype(dtype):
return np.nextafter(label, np.infty)
else:
raise TypeError(f"cannot determine next label for type {repr(type(label))}")
def _get_prev_label(label):
dtype = getattr(label, "dtype", type(label))
if isinstance(label, (Timestamp, Timedelta)):
dtype = "datetime64"
if is_datetime_or_timedelta_dtype(dtype) or is_datetime64tz_dtype(dtype):
return label - np.timedelta64(1, "ns")
elif is_integer_dtype(dtype):
return label - 1
elif is_float_dtype(dtype):
return np.nextafter(label, -np.infty)
else:
raise TypeError(f"cannot determine next label for type {repr(type(label))}")
def _new_IntervalIndex(cls, d):
"""
This is called upon unpickling, rather than the default which doesn't have
arguments and breaks __new__.
"""
return cls.from_arrays(**d)
class SetopCheck:
"""
This is called to decorate the set operations of IntervalIndex
to perform the type check in advance.
"""
def __init__(self, op_name):
self.op_name = op_name
def __call__(self, setop):
def func(intvidx_self, other, sort=False):
intvidx_self._assert_can_do_setop(other)
other = ensure_index(other)
if not isinstance(other, IntervalIndex):
result = getattr(intvidx_self.astype(object), self.op_name)(other)
if self.op_name in ("difference",):
result = result.astype(intvidx_self.dtype)
return result
elif intvidx_self.closed != other.closed:
raise ValueError(
"can only do set operations between two IntervalIndex "
"objects that are closed on the same side"
)
# GH 19016: ensure set op will not return a prohibited dtype
subtypes = [intvidx_self.dtype.subtype, other.dtype.subtype]
common_subtype = find_common_type(subtypes)
if is_object_dtype(common_subtype):
raise TypeError(
f"can only do {self.op_name} between two IntervalIndex "
"objects that have compatible dtypes"
)
return setop(intvidx_self, other, sort)
return func
@Appender(
_interval_shared_docs["class"]
% dict(
klass="IntervalIndex",
summary="Immutable index of intervals that are closed on the same side.",
name=_index_doc_kwargs["name"],
versionadded="0.20.0",
extra_attributes="is_overlapping\nvalues\n",
extra_methods="",
examples=textwrap.dedent(
"""\
Examples
--------
A new ``IntervalIndex`` is typically constructed using
:func:`interval_range`:
>>> pd.interval_range(start=0, end=5)
IntervalIndex([(0, 1], (1, 2], (2, 3], (3, 4], (4, 5]],
closed='right',
dtype='interval[int64]')
It may also be constructed using one of the constructor
methods: :meth:`IntervalIndex.from_arrays`,
:meth:`IntervalIndex.from_breaks`, and :meth:`IntervalIndex.from_tuples`.
See further examples in the doc strings of ``interval_range`` and the
mentioned constructor methods.
"""
),
)
)
@inherit_names(["set_closed", "to_tuples"], IntervalArray, wrap=True)
@inherit_names(
["__array__", "overlaps", "contains", "left", "right", "length"], IntervalArray,
)
@inherit_names(
["is_non_overlapping_monotonic", "mid", "closed"], IntervalArray, cache=True,
)
class IntervalIndex(IntervalMixin, ExtensionIndex):
_typ = "intervalindex"
_comparables = ["name"]
_attributes = ["name"]
# we would like our indexing holder to defer to us
_defer_to_indexing = True
# Immutable, so we are able to cache computations like isna in '_mask'
_mask = None
_data: IntervalArray
# --------------------------------------------------------------------
# Constructors
def __new__(
cls,
data,
closed=None,
dtype=None,
copy: bool = False,
name=None,
verify_integrity: bool = True,
):
name = maybe_extract_name(name, data, cls)
with rewrite_exception("IntervalArray", cls.__name__):
array = IntervalArray(
data,
closed=closed,
copy=copy,
dtype=dtype,
verify_integrity=verify_integrity,
)
return cls._simple_new(array, name)
@classmethod
def _simple_new(cls, array: IntervalArray, name: Label = None):
"""
Construct from an IntervalArray
Parameters
----------
array : IntervalArray
name : Label, default None
Attached as result.name
"""
assert isinstance(array, IntervalArray), type(array)
result = IntervalMixin.__new__(cls)
result._data = array
result.name = name
result._cache = {}
result._no_setting_name = False
result._reset_identity()
return result
@classmethod
@Appender(
_interval_shared_docs["from_breaks"]
% dict(
klass="IntervalIndex",
examples=textwrap.dedent(
"""\
Examples
--------
>>> pd.IntervalIndex.from_breaks([0, 1, 2, 3])
IntervalIndex([(0, 1], (1, 2], (2, 3]],
closed='right',
dtype='interval[int64]')
"""
),
)
)
def from_breaks(
cls, breaks, closed: str = "right", name=None, copy: bool = False, dtype=None
):
with rewrite_exception("IntervalArray", cls.__name__):
array = IntervalArray.from_breaks(
breaks, closed=closed, copy=copy, dtype=dtype
)
return cls._simple_new(array, name=name)
@classmethod
@Appender(
_interval_shared_docs["from_arrays"]
% dict(
klass="IntervalIndex",
examples=textwrap.dedent(
"""\
Examples
--------
>>> pd.IntervalIndex.from_arrays([0, 1, 2], [1, 2, 3])
IntervalIndex([(0, 1], (1, 2], (2, 3]],
closed='right',
dtype='interval[int64]')
"""
),
)
)
def from_arrays(
cls,
left,
right,
closed: str = "right",
name=None,
copy: bool = False,
dtype=None,
):
with rewrite_exception("IntervalArray", cls.__name__):
array = IntervalArray.from_arrays(
left, right, closed, copy=copy, dtype=dtype
)
return cls._simple_new(array, name=name)
@classmethod
@Appender(
_interval_shared_docs["from_tuples"]
% dict(
klass="IntervalIndex",
examples=textwrap.dedent(
"""\
Examples
--------
>>> pd.IntervalIndex.from_tuples([(0, 1), (1, 2)])
IntervalIndex([(0, 1], (1, 2]],
closed='right',
dtype='interval[int64]')
"""
),
)
)
def from_tuples(
cls, data, closed: str = "right", name=None, copy: bool = False, dtype=None
):
with rewrite_exception("IntervalArray", cls.__name__):
arr = IntervalArray.from_tuples(data, closed=closed, copy=copy, dtype=dtype)
return cls._simple_new(arr, name=name)
# --------------------------------------------------------------------
@Appender(Index._shallow_copy.__doc__)
def _shallow_copy(self, values=None, name: Label = lib.no_default):
name = self.name if name is lib.no_default else name
cache = self._cache.copy() if values is None else {}
if values is None:
values = self._data
result = self._simple_new(values, name=name)
result._cache = cache
return result
@cache_readonly
def _isnan(self):
"""
Return a mask indicating if each value is NA.
"""
if self._mask is None:
self._mask = isna(self.left)
return self._mask
@cache_readonly
def _engine(self):
left = self._maybe_convert_i8(self.left)
right = self._maybe_convert_i8(self.right)
return IntervalTree(left, right, closed=self.closed)
def __contains__(self, key: Any) -> bool:
"""
return a boolean if this key is IN the index
We *only* accept an Interval
Parameters
----------
key : Interval
Returns
-------
bool
"""
hash(key)
if not isinstance(key, Interval):
return False
try:
self.get_loc(key)
return True
except KeyError:
return False
@cache_readonly
def _multiindex(self) -> MultiIndex:
return MultiIndex.from_arrays([self.left, self.right], names=["left", "right"])
@cache_readonly
def values(self) -> IntervalArray:
"""
Return the IntervalIndex's data as an IntervalArray.
"""
return self._data
@property
def _has_complex_internals(self) -> bool:
# used to avoid libreduction code paths, which raise or require conversion
return True
def __array_wrap__(self, result, context=None):
# we don't want the superclass implementation
return result
def __reduce__(self):
d = dict(left=self.left, right=self.right)
d.update(self._get_attributes_dict())
return _new_IntervalIndex, (type(self), d), None
@Appender(Index.astype.__doc__)
def astype(self, dtype, copy=True):
with rewrite_exception("IntervalArray", type(self).__name__):
new_values = self._values.astype(dtype, copy=copy)
if is_interval_dtype(new_values.dtype):
return self._shallow_copy(new_values)
return Index.astype(self, dtype, copy=copy)
@property
def inferred_type(self) -> str:
"""Return a string of the type inferred from the values"""
return "interval"
@Appender(Index.memory_usage.__doc__)
def memory_usage(self, deep: bool = False) -> int:
# we don't use an explicit engine
# so return the bytes here
return self.left.memory_usage(deep=deep) + self.right.memory_usage(deep=deep)
# IntervalTree doesn't have a is_monotonic_decreasing, so have to override
# the Index implementation
@cache_readonly
def is_monotonic_decreasing(self) -> bool:
"""
Return True if the IntervalIndex is monotonic decreasing (only equal or
decreasing values), else False
"""
return self[::-1].is_monotonic_increasing
@cache_readonly
def is_unique(self):
"""
Return True if the IntervalIndex contains unique elements, else False.
"""
left = self.left
right = self.right
if self.isna().sum() > 1:
return False
if left.is_unique or right.is_unique:
return True
seen_pairs = set()
check_idx = np.where(left.duplicated(keep=False))[0]
for idx in check_idx:
pair = (left[idx], right[idx])
if pair in seen_pairs:
return False
seen_pairs.add(pair)
return True
@property
def is_overlapping(self) -> bool:
"""
Return True if the IntervalIndex has overlapping intervals, else False.
Two intervals overlap if they share a common point, including closed
endpoints. Intervals that only have an open endpoint in common do not
overlap.
.. versionadded:: 0.24.0
Returns
-------
bool
Boolean indicating if the IntervalIndex has overlapping intervals.
See Also
--------
Interval.overlaps : Check whether two Interval objects overlap.
IntervalIndex.overlaps : Check an IntervalIndex elementwise for
overlaps.
Examples
--------
>>> index = pd.IntervalIndex.from_tuples([(0, 2), (1, 3), (4, 5)])
>>> index
IntervalIndex([(0, 2], (1, 3], (4, 5]],
closed='right',
dtype='interval[int64]')
>>> index.is_overlapping
True
Intervals that share closed endpoints overlap:
>>> index = pd.interval_range(0, 3, closed='both')
>>> index
IntervalIndex([[0, 1], [1, 2], [2, 3]],
closed='both',
dtype='interval[int64]')
>>> index.is_overlapping
True
Intervals that only have an open endpoint in common do not overlap:
>>> index = pd.interval_range(0, 3, closed='left')
>>> index
IntervalIndex([[0, 1), [1, 2), [2, 3)],
closed='left',
dtype='interval[int64]')
>>> index.is_overlapping
False
"""
# GH 23309
return self._engine.is_overlapping
def _should_fallback_to_positional(self) -> bool:
# integer lookups in Series.__getitem__ are unambiguously
# positional in this case
return self.dtype.subtype.kind in ["m", "M"]
def _maybe_cast_slice_bound(self, label, side, kind):
return getattr(self, side)._maybe_cast_slice_bound(label, side, kind)
@Appender(Index._convert_list_indexer.__doc__)
def _convert_list_indexer(self, keyarr):
"""
we are passed a list-like indexer. Return the
indexer for matching intervals.
"""
locs = self.get_indexer_for(keyarr)
# we have missing values
if (locs == -1).any():
raise KeyError
return locs
def _can_reindex(self, indexer: np.ndarray) -> None:
"""
Check if we are allowing reindexing with this particular indexer.
Parameters
----------
indexer : an integer indexer
Raises
------
ValueError if its a duplicate axis
"""
# trying to reindex on an axis with duplicates
if self.is_overlapping and len(indexer):
raise ValueError("cannot reindex from an overlapping axis")
def _needs_i8_conversion(self, key) -> bool:
"""
Check if a given key needs i8 conversion. Conversion is necessary for
Timestamp, Timedelta, DatetimeIndex, and TimedeltaIndex keys. An
Interval-like requires conversion if it's endpoints are one of the
aforementioned types.
Assumes that any list-like data has already been cast to an Index.
Parameters
----------
key : scalar or Index-like
The key that should be checked for i8 conversion
Returns
-------
bool
"""
if is_interval_dtype(key) or isinstance(key, Interval):
return self._needs_i8_conversion(key.left)
i8_types = (Timestamp, Timedelta, DatetimeIndex, TimedeltaIndex)
return isinstance(key, i8_types)
def _maybe_convert_i8(self, key):
"""
Maybe convert a given key to it's equivalent i8 value(s). Used as a
preprocessing step prior to IntervalTree queries (self._engine), which
expects numeric data.
Parameters
----------
key : scalar or list-like
The key that should maybe be converted to i8.
Returns
-------
scalar or list-like
The original key if no conversion occurred, int if converted scalar,
Int64Index if converted list-like.
"""
original = key
if is_list_like(key):
key = ensure_index(key)
if not self._needs_i8_conversion(key):
return original
scalar = is_scalar(key)
if is_interval_dtype(key) or isinstance(key, Interval):
# convert left/right and reconstruct
left = self._maybe_convert_i8(key.left)
right = self._maybe_convert_i8(key.right)
constructor = Interval if scalar else IntervalIndex.from_arrays
return constructor(left, right, closed=self.closed)
if scalar:
# Timestamp/Timedelta
key_dtype, key_i8 = infer_dtype_from_scalar(key, pandas_dtype=True)
else:
# DatetimeIndex/TimedeltaIndex
key_dtype, key_i8 = key.dtype, Index(key.asi8)
if key.hasnans:
# convert NaT from it's i8 value to np.nan so it's not viewed
# as a valid value, maybe causing errors (e.g. is_overlapping)
key_i8 = key_i8.where(~key._isnan)
# ensure consistency with IntervalIndex subtype
subtype = self.dtype.subtype
if not is_dtype_equal(subtype, key_dtype):
raise ValueError(
f"Cannot index an IntervalIndex of subtype {subtype} with "
f"values of dtype {key_dtype}"
)
return key_i8
def _check_method(self, method):
if method is None:
return
if method in ["bfill", "backfill", "pad", "ffill", "nearest"]:
raise NotImplementedError(
f"method {method} not yet implemented for IntervalIndex"
)
raise ValueError("Invalid fill method")
def _searchsorted_monotonic(self, label, side, exclude_label=False):
if not self.is_non_overlapping_monotonic:
raise KeyError(
"can only get slices from an IntervalIndex if bounds are "
"non-overlapping and all monotonic increasing or decreasing"
)
if isinstance(label, IntervalMixin):
raise NotImplementedError("Interval objects are not currently supported")
# GH 20921: "not is_monotonic_increasing" for the second condition
# instead of "is_monotonic_decreasing" to account for single element
# indexes being both increasing and decreasing
if (side == "left" and self.left.is_monotonic_increasing) or (
side == "right" and not self.left.is_monotonic_increasing
):
sub_idx = self.right
if self.open_right or exclude_label:
label = _get_next_label(label)
else:
sub_idx = self.left
if self.open_left or exclude_label:
label = _get_prev_label(label)
return sub_idx._searchsorted_monotonic(label, side)
def get_loc(
self, key, method: Optional[str] = None, tolerance=None
) -> Union[int, slice, np.ndarray]:
"""
Get integer location, slice or boolean mask for requested label.
Parameters
----------
key : label
method : {None}, optional
* default: matches where the label is within an interval only.
Returns
-------
int if unique index, slice if monotonic index, else mask
Examples
--------
>>> i1, i2 = pd.Interval(0, 1), pd.Interval(1, 2)
>>> index = pd.IntervalIndex([i1, i2])
>>> index.get_loc(1)
0
You can also supply a point inside an interval.
>>> index.get_loc(1.5)
1
If a label is in several intervals, you get the locations of all the
relevant intervals.
>>> i3 = pd.Interval(0, 2)
>>> overlapping_index = pd.IntervalIndex([i1, i2, i3])
>>> overlapping_index.get_loc(0.5)
array([ True, False, True])
Only exact matches will be returned if an interval is provided.
>>> index.get_loc(pd.Interval(0, 1))
0
"""
self._check_method(method)
if not is_scalar(key):
raise InvalidIndexError(key)
if isinstance(key, Interval):
if self.closed != key.closed:
raise KeyError(key)
mask = (self.left == key.left) & (self.right == key.right)
else:
# assume scalar
op_left = le if self.closed_left else lt
op_right = le if self.closed_right else lt
try:
mask = op_left(self.left, key) & op_right(key, self.right)
except TypeError as err:
# scalar is not comparable to II subtype --> invalid label
raise KeyError(key) from err
matches = mask.sum()
if matches == 0:
raise KeyError(key)
elif matches == 1:
return mask.argmax()
return lib.maybe_booleans_to_slice(mask.view("u1"))
@Substitution(
**dict(
_index_doc_kwargs,
**{
"raises_section": textwrap.dedent(
"""
Raises
------
NotImplementedError
If any method argument other than the default of
None is specified as these are not yet implemented.
"""
)
},
)
)
@Appender(_index_shared_docs["get_indexer"])
def get_indexer(
self,
target: AnyArrayLike,
method: Optional[str] = None,
limit: Optional[int] = None,
tolerance: Optional[Any] = None,
) -> np.ndarray:
self._check_method(method)
if self.is_overlapping:
raise InvalidIndexError(
"cannot handle overlapping indices; "
"use IntervalIndex.get_indexer_non_unique"
)
target_as_index = ensure_index(target)
if isinstance(target_as_index, IntervalIndex):
# equal indexes -> 1:1 positional match
if self.equals(target_as_index):
return np.arange(len(self), dtype="intp")
# different closed or incompatible subtype -> no matches
common_subtype = find_common_type(
[self.dtype.subtype, target_as_index.dtype.subtype]
)
if self.closed != target_as_index.closed or is_object_dtype(common_subtype):
return np.repeat(np.intp(-1), len(target_as_index))
# non-overlapping -> at most one match per interval in target_as_index
# want exact matches -> need both left/right to match, so defer to
# left/right get_indexer, compare elementwise, equality -> match
left_indexer = self.left.get_indexer(target_as_index.left)
right_indexer = self.right.get_indexer(target_as_index.right)
indexer = np.where(left_indexer == right_indexer, left_indexer, -1)
elif is_categorical_dtype(target_as_index.dtype):
# get an indexer for unique categories then propagate to codes via take_1d
categories_indexer = self.get_indexer(target_as_index.categories)
indexer = take_1d(categories_indexer, target_as_index.codes, fill_value=-1)
elif not is_object_dtype(target_as_index):
# homogeneous scalar index: use IntervalTree
target_as_index = self._maybe_convert_i8(target_as_index)
indexer = self._engine.get_indexer(target_as_index.values)
else:
# heterogeneous scalar index: defer elementwise to get_loc
# (non-overlapping so get_loc guarantees scalar of KeyError)
indexer = []
for key in target_as_index:
try:
loc = self.get_loc(key)
except KeyError:
loc = -1
except InvalidIndexError as err:
# i.e. non-scalar key
raise TypeError(key) from err
indexer.append(loc)
return ensure_platform_int(indexer)
@Appender(_index_shared_docs["get_indexer_non_unique"] % _index_doc_kwargs)
def get_indexer_non_unique(
self, target: AnyArrayLike
) -> Tuple[np.ndarray, np.ndarray]:
target_as_index = ensure_index(target)
# check that target_as_index IntervalIndex is compatible
if isinstance(target_as_index, IntervalIndex):
common_subtype = find_common_type(
[self.dtype.subtype, target_as_index.dtype.subtype]
)
if self.closed != target_as_index.closed or is_object_dtype(common_subtype):
# different closed or incompatible subtype -> no matches
return (
np.repeat(-1, len(target_as_index)),
np.arange(len(target_as_index)),
)
if is_object_dtype(target_as_index) or isinstance(
target_as_index, IntervalIndex
):
# target_as_index might contain intervals: defer elementwise to get_loc
indexer, missing = [], []
for i, key in enumerate(target_as_index):
try:
locs = self.get_loc(key)
if isinstance(locs, slice):
locs = np.arange(locs.start, locs.stop, locs.step, dtype="intp")
locs = np.array(locs, ndmin=1)
except KeyError:
missing.append(i)
locs = np.array([-1])
indexer.append(locs)
indexer = np.concatenate(indexer)
else:
target_as_index = self._maybe_convert_i8(target_as_index)
indexer, missing = self._engine.get_indexer_non_unique(
target_as_index.values
)
return ensure_platform_int(indexer), ensure_platform_int(missing)
def get_indexer_for(self, target: AnyArrayLike, **kwargs) -> np.ndarray:
"""
Guaranteed return of an indexer even when overlapping.
This dispatches to get_indexer or get_indexer_non_unique
as appropriate.
Returns
-------
numpy.ndarray
List of indices.
"""
if self.is_overlapping:
return self.get_indexer_non_unique(target)[0]
return self.get_indexer(target, **kwargs)
def _convert_slice_indexer(self, key: slice, kind: str):
if not (key.step is None or key.step == 1):
# GH#31658 if label-based, we require step == 1,
# if positional, we disallow float start/stop
msg = "label-based slicing with step!=1 is not supported for IntervalIndex"
if kind == "loc":
raise ValueError(msg)
elif kind == "getitem":
if not is_valid_positional_slice(key):
# i.e. this cannot be interpreted as a positional slice
raise ValueError(msg)
return super()._convert_slice_indexer(key, kind)
@Appender(Index.where.__doc__)
def where(self, cond, other=None):
if other is None:
other = self._na_value
values = np.where(cond, self._values, other)
result = IntervalArray(values)
return self._shallow_copy(result)
def delete(self, loc):
"""
Return a new IntervalIndex with passed location(-s) deleted
Returns
-------
IntervalIndex
"""
new_left = self.left.delete(loc)
new_right = self.right.delete(loc)
result = self._data._shallow_copy(new_left, new_right)
return self._shallow_copy(result)
def insert(self, loc, item):
"""
Return a new IntervalIndex inserting new item at location. Follows
Python list.append semantics for negative values. Only Interval
objects and NA can be inserted into an IntervalIndex
Parameters
----------
loc : int
item : object
Returns
-------
IntervalIndex
"""
if isinstance(item, Interval):
if item.closed != self.closed:
raise ValueError(
"inserted item must be closed on the same side as the index"
)
left_insert = item.left
right_insert = item.right
elif is_scalar(item) and isna(item):
# GH 18295
left_insert = right_insert = item
else:
raise ValueError(
"can only insert Interval objects and NA into an IntervalIndex"
)
new_left = self.left.insert(loc, left_insert)
new_right = self.right.insert(loc, right_insert)
result = self._data._shallow_copy(new_left, new_right)
return self._shallow_copy(result)
@Appender(_index_shared_docs["take"] % _index_doc_kwargs)
def take(self, indices, axis=0, allow_fill=True, fill_value=None, **kwargs):
result = self._data.take(
indices, axis=axis, allow_fill=allow_fill, fill_value=fill_value, **kwargs
)
return self._shallow_copy(result)
# --------------------------------------------------------------------
# Rendering Methods
# __repr__ associated methods are based on MultiIndex
def _format_with_header(self, header, **kwargs):
return header + list(self._format_native_types(**kwargs))
def _format_native_types(self, na_rep="NaN", quoting=None, **kwargs):
# GH 28210: use base method but with different default na_rep
return super()._format_native_types(na_rep=na_rep, quoting=quoting, **kwargs)
def _format_data(self, name=None):
# TODO: integrate with categorical and make generic
# name argument is unused here; just for compat with base / categorical
n = len(self)
max_seq_items = min((get_option("display.max_seq_items") or n) // 10, 10)
formatter = str
if n == 0:
summary = "[]"
elif n == 1:
first = formatter(self[0])
summary = f"[{first}]"
elif n == 2:
first = formatter(self[0])
last = formatter(self[-1])
summary = f"[{first}, {last}]"
else:
if n > max_seq_items:
n = min(max_seq_items // 2, 10)
head = [formatter(x) for x in self[:n]]
tail = [formatter(x) for x in self[-n:]]
head_joined = ", ".join(head)
tail_joined = ", ".join(tail)
summary = f"[{head_joined} ... {tail_joined}]"
else:
tail = [formatter(x) for x in self]
joined = ", ".join(tail)
summary = f"[{joined}]"
return summary + "," + self._format_space()
def _format_attrs(self):
attrs = [("closed", repr(self.closed))]
if self.name is not None:
attrs.append(("name", default_pprint(self.name)))
attrs.append(("dtype", f"'{self.dtype}'"))
return attrs
def _format_space(self) -> str:
space = " " * (len(type(self).__name__) + 1)
return f"\n{space}"
# --------------------------------------------------------------------
def argsort(self, *args, **kwargs) -> np.ndarray:
return np.lexsort((self.right, self.left))
def equals(self, other) -> bool:
"""
Determines if two IntervalIndex objects contain the same elements.
"""
if self.is_(other):
return True
# if we can coerce to an II
# then we can compare
if not isinstance(other, IntervalIndex):
if not is_interval_dtype(other):
return False
other = Index(other)
return (
self.left.equals(other.left)
and self.right.equals(other.right)
and self.closed == other.closed
)
@Appender(Index.intersection.__doc__)
@SetopCheck(op_name="intersection")
def intersection(
self, other: "IntervalIndex", sort: bool = False
) -> "IntervalIndex":
if self.left.is_unique and self.right.is_unique:
taken = self._intersection_unique(other)
elif other.left.is_unique and other.right.is_unique and self.isna().sum() <= 1:
# Swap other/self if other is unique and self does not have
# multiple NaNs
taken = other._intersection_unique(self)
else:
# duplicates
taken = self._intersection_non_unique(other)
if sort is None:
taken = taken.sort_values()
return taken
def _intersection_unique(self, other: "IntervalIndex") -> "IntervalIndex":
"""
Used when the IntervalIndex does not have any common endpoint,
no mater left or right.
Return the intersection with another IntervalIndex.
Parameters
----------
other : IntervalIndex
Returns
-------
IntervalIndex
"""
lindexer = self.left.get_indexer(other.left)
rindexer = self.right.get_indexer(other.right)
match = (lindexer == rindexer) & (lindexer != -1)
indexer = lindexer.take(match.nonzero()[0])
return self.take(indexer)
def _intersection_non_unique(self, other: "IntervalIndex") -> "IntervalIndex":
"""
Used when the IntervalIndex does have some common endpoints,
on either sides.
Return the intersection with another IntervalIndex.
Parameters
----------
other : IntervalIndex
Returns
-------
IntervalIndex
"""
mask = np.zeros(len(self), dtype=bool)
if self.hasnans and other.hasnans:
first_nan_loc = np.arange(len(self))[self.isna()][0]
mask[first_nan_loc] = True
other_tups = set(zip(other.left, other.right))
for i, tup in enumerate(zip(self.left, self.right)):
if tup in other_tups:
mask[i] = True
return self[mask]
def _setop(op_name: str, sort=None):
@SetopCheck(op_name=op_name)
def func(self, other, sort=sort):
result = getattr(self._multiindex, op_name)(other._multiindex, sort=sort)
result_name = get_op_result_name(self, other)
# GH 19101: ensure empty results have correct dtype
if result.empty:
result = result._values.astype(self.dtype.subtype)
else:
result = result._values
return type(self).from_tuples(result, closed=self.closed, name=result_name)
return func
@property
def is_all_dates(self) -> bool:
"""
This is False even when left/right contain datetime-like objects,
as the check is done on the Interval itself
"""
return False
union = _setop("union")
difference = _setop("difference")
symmetric_difference = _setop("symmetric_difference")
# TODO: arithmetic operations
# GH#30817 until IntervalArray implements inequalities, get them from Index
def __lt__(self, other):
return Index.__lt__(self, other)
def __le__(self, other):
return Index.__le__(self, other)
def __gt__(self, other):
return Index.__gt__(self, other)
def __ge__(self, other):
return Index.__ge__(self, other)
IntervalIndex._add_logical_methods_disabled()
def _is_valid_endpoint(endpoint) -> bool:
"""
Helper for interval_range to check if start/end are valid types.
"""
return any(
[
is_number(endpoint),
isinstance(endpoint, Timestamp),
isinstance(endpoint, Timedelta),
endpoint is None,
]
)
def _is_type_compatible(a, b) -> bool:
"""
Helper for interval_range to check type compat of start/end/freq.
"""
is_ts_compat = lambda x: isinstance(x, (Timestamp, DateOffset))
is_td_compat = lambda x: isinstance(x, (Timedelta, DateOffset))
return (
(is_number(a) and is_number(b))
or (is_ts_compat(a) and is_ts_compat(b))
or (is_td_compat(a) and is_td_compat(b))
or com.any_none(a, b)
)
def interval_range(
start=None, end=None, periods=None, freq=None, name=None, closed="right"
):
"""
Return a fixed frequency IntervalIndex.
Parameters
----------
start : numeric or datetime-like, default None
Left bound for generating intervals.
end : numeric or datetime-like, default None
Right bound for generating intervals.
periods : int, default None
Number of periods to generate.
freq : numeric, str, or DateOffset, default None
The length of each interval. Must be consistent with the type of start
and end, e.g. 2 for numeric, or '5H' for datetime-like. Default is 1
for numeric and 'D' for datetime-like.
name : str, default None
Name of the resulting IntervalIndex.
closed : {'left', 'right', 'both', 'neither'}, default 'right'
Whether the intervals are closed on the left-side, right-side, both
or neither.
Returns
-------
IntervalIndex
See Also
--------
IntervalIndex : An Index of intervals that are all closed on the same side.
Notes
-----
Of the four parameters ``start``, ``end``, ``periods``, and ``freq``,
exactly three must be specified. If ``freq`` is omitted, the resulting
``IntervalIndex`` will have ``periods`` linearly spaced elements between
``start`` and ``end``, inclusively.
To learn more about datetime-like frequency strings, please see `this link
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
Examples
--------
Numeric ``start`` and ``end`` is supported.
>>> pd.interval_range(start=0, end=5)
IntervalIndex([(0, 1], (1, 2], (2, 3], (3, 4], (4, 5]],
closed='right', dtype='interval[int64]')
Additionally, datetime-like input is also supported.
>>> pd.interval_range(start=pd.Timestamp('2017-01-01'),
... end=pd.Timestamp('2017-01-04'))
IntervalIndex([(2017-01-01, 2017-01-02], (2017-01-02, 2017-01-03],
(2017-01-03, 2017-01-04]],
closed='right', dtype='interval[datetime64[ns]]')
The ``freq`` parameter specifies the frequency between the left and right.
endpoints of the individual intervals within the ``IntervalIndex``. For
numeric ``start`` and ``end``, the frequency must also be numeric.
>>> pd.interval_range(start=0, periods=4, freq=1.5)
IntervalIndex([(0.0, 1.5], (1.5, 3.0], (3.0, 4.5], (4.5, 6.0]],
closed='right', dtype='interval[float64]')
Similarly, for datetime-like ``start`` and ``end``, the frequency must be
convertible to a DateOffset.
>>> pd.interval_range(start=pd.Timestamp('2017-01-01'),
... periods=3, freq='MS')
IntervalIndex([(2017-01-01, 2017-02-01], (2017-02-01, 2017-03-01],
(2017-03-01, 2017-04-01]],
closed='right', dtype='interval[datetime64[ns]]')
Specify ``start``, ``end``, and ``periods``; the frequency is generated
automatically (linearly spaced).
>>> pd.interval_range(start=0, end=6, periods=4)
IntervalIndex([(0.0, 1.5], (1.5, 3.0], (3.0, 4.5], (4.5, 6.0]],
closed='right',
dtype='interval[float64]')
The ``closed`` parameter specifies which endpoints of the individual
intervals within the ``IntervalIndex`` are closed.
>>> pd.interval_range(end=5, periods=4, closed='both')
IntervalIndex([[1, 2], [2, 3], [3, 4], [4, 5]],
closed='both', dtype='interval[int64]')
"""
start = com.maybe_box_datetimelike(start)
end = com.maybe_box_datetimelike(end)
endpoint = start if start is not None else end
if freq is None and com.any_none(periods, start, end):
freq = 1 if is_number(endpoint) else "D"
if com.count_not_none(start, end, periods, freq) != 3:
raise ValueError(
"Of the four parameters: start, end, periods, and "
"freq, exactly three must be specified"
)
if not _is_valid_endpoint(start):
raise ValueError(f"start must be numeric or datetime-like, got {start}")
elif not _is_valid_endpoint(end):
raise ValueError(f"end must be numeric or datetime-like, got {end}")
if is_float(periods):
periods = int(periods)
elif not is_integer(periods) and periods is not None:
raise TypeError(f"periods must be a number, got {periods}")
if freq is not None and not is_number(freq):
try:
freq = to_offset(freq)
except ValueError as err:
raise ValueError(
f"freq must be numeric or convertible to DateOffset, got {freq}"
) from err
# verify type compatibility
if not all(
[
_is_type_compatible(start, end),
_is_type_compatible(start, freq),
_is_type_compatible(end, freq),
]
):
raise TypeError("start, end, freq need to be type compatible")
# +1 to convert interval count to breaks count (n breaks = n-1 intervals)
if periods is not None:
periods += 1
if is_number(endpoint):
# force consistency between start/end/freq (lower end if freq skips it)
if com.all_not_none(start, end, freq):
end -= (end - start) % freq
# compute the period/start/end if unspecified (at most one)
if periods is None:
periods = int((end - start) // freq) + 1
elif start is None:
start = end - (periods - 1) * freq
elif end is None:
end = start + (periods - 1) * freq
breaks = np.linspace(start, end, periods)
if all(is_integer(x) for x in com.not_none(start, end, freq)):
# np.linspace always produces float output
breaks = maybe_downcast_to_dtype(breaks, "int64")
else:
# delegate to the appropriate range function
if isinstance(endpoint, Timestamp):
range_func = date_range
else:
range_func = timedelta_range
breaks = range_func(start=start, end=end, periods=periods, freq=freq)
return IntervalIndex.from_breaks(breaks, name=name, closed=closed)
|
py
|
1a59e0f88b4c5acd055caeaef4af47b04e99162d
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@File : verify_img_generator
@Author : alanzhchou
@Create : 19/7/12-1:23
@Python : 3.62
@Version : 1.0
@Email : [email protected]
@Copyright : MIT License - Copyright (c) 2019 alanzhchou
@description: generate img of nums or chars for internet verify
@Change log :
19/7/12-1:23 created
"""
|
py
|
1a59e33cdc48e93913b5feb15e1063b142fc024b
|
"""add discovery
Revision ID: 5a05464c07ae
Revises: c4a0292785e6
Create Date: 2017-09-06 21:55:21.193584
"""
# revision identifiers, used by Alembic.
revision = "5a05464c07ae"
down_revision = "c4a0292785e6"
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column("elements", sa.Column("discoverers", sa.String))
op.add_column("elements", sa.Column("discovery_year", sa.Integer))
op.add_column("elements", sa.Column("discovery_location", sa.String))
def downgrade():
with op.batch_alter_table("elements") as batch_op:
batch_op.drop_column("discoverers")
batch_op.drop_column("discovery_year")
batch_op.drop_column("discovery_location")
|
py
|
1a59e39eb98ed7e4b765e9ae0d53ece3ffb92f83
|
# PySpice example code
import matplotlib.pyplot as plt
import PySpice.Logging.Logging as Logging
from PySpice.Doc.ExampleTools import find_libraries
from PySpice.Probe.Plot import plot
from PySpice.Spice.Library import SpiceLibrary
from PySpice.Spice.Netlist import Circuit
from PySpice.Unit import *
logger = Logging.setup_logging()
libraries_path = find_libraries()
spice_library = SpiceLibrary(libraries_path)
circuit = Circuit('NMOS Transistor')
circuit.include(spice_library['ptm65nm_nmos'])
# Define the DC supply voltage value
Vdd = 1.1
# Instanciate circuit elements
Vgate = circuit.V('gate', 'gatenode', circuit.gnd, 0@u_V)
Vdrain = circuit.V('drain', 'vdd', circuit.gnd, u_V(Vdd))
# M <name> <drain node> <gate node> <source node> <bulk/substrate node>
circuit.MOSFET(1, 'vdd', 'gatenode', circuit.gnd, circuit.gnd, model='ptm65nm_nmos')
#r# We plot the characteristics :math:`Id = f(Vgs)` using a DC sweep simulation.
simulator = circuit.simulator(temperature=25, nominal_temperature=25)
analysis = simulator.dc(Vgate=slice(0, Vdd, .01))
figure, ax = plt.subplots(figsize=(20, 10))
ax.plot(analysis['gatenode'], u_mA(-analysis.Vdrain))
ax.legend('NMOS characteristic')
ax.grid()
ax.set_xlabel('Vgs [V]')
ax.set_ylabel('Id [mA]')
plt.tight_layout()
plt.show()
#f# save_figure('figure', 'transistor-nmos-plot.png')
|
py
|
1a59e404ea306f10267570385de32da5acc3a67c
|
#-*- coding: utf-8 -*-
from .scope import ScopeTestCase
from .json import JSONTestCase
from .mac import MACTestCase
from .bearer import BearerTestCase
from .responsetype import ResponseTypeTestCase
from .granttype import GrantTypeTestCase
from .config import ConfigTestCase
|
py
|
1a59e420d81d319886f11fbe10d53931f05662b6
|
from __future__ import annotations
from typing import Type
import imm
import nmm
from ._cdata import CData
from ._ffi import ffi, lib
from .metadata import Metadata
__all__ = ["DCPProfile"]
class DCPProfile:
def __init__(self, dcp_profile: CData, profile: nmm.Profile, metadata: Metadata):
self._dcp_profile = dcp_profile
if self._dcp_profile == ffi.NULL:
raise RuntimeError("`dcp_profile` is NULL.")
self._profile = profile
self._metadata = metadata
@property
def dcp_profile(self) -> CData:
return self._dcp_profile
@classmethod
def create(
cls: Type[DCPProfile], alphabet: nmm.BaseAlphabet, metadata: Metadata
) -> DCPProfile:
dcp_profile = lib.dcp_profile_create(alphabet.imm_abc, metadata.dcp_metadata)
prof = nmm.wrap.nmm_profile(lib.dcp_profile_nmm_profile(dcp_profile), alphabet)
return cls(dcp_profile, prof, metadata)
def append_model(self, model: imm.Model):
self._profile.append_model(model)
@property
def alphabet(self):
return self._profile.alphabet
@property
def metadata(self) -> Metadata:
return self._metadata
@property
def models(self):
return self._profile.models
@property
def profid(self) -> int:
return lib.dcp_profile_id(self._dcp_profile)
def __del__(self):
if self._dcp_profile != ffi.NULL:
lib.dcp_profile_free(self._dcp_profile)
|
py
|
1a59e48b5e22702b3116302460e1883c8f842010
|
import hashlib
import itertools
import re
import sys
import warnings
from collections import defaultdict
from importlib import import_module
from types import ModuleType
from content_editor.models import Type
from django.conf import settings
from django.core.checks import Warning
from django.core.exceptions import ValidationError
from django.db import models
from django.db.models import Q, signals
from django.urls import NoReverseMatch, include, re_path, reverse
from django.utils.translation import get_language, gettext_lazy as _
from feincms3.mixins import ChoicesCharField
__all__ = (
"PageTypeMixin",
"TemplateType",
"ApplicationType",
"apps_middleware",
"apps_urlconf",
"page_for_app_request",
"reverse_any",
"reverse_app",
"reverse_fallback",
)
_APPS_MODEL = None
def reverse_any(viewnames, urlconf=None, args=None, kwargs=None, *fargs, **fkwargs):
"""
Tries reversing a list of viewnames with the same arguments, and returns
the first result where no ``NoReverseMatch`` exception is raised.
Usage::
url = reverse_any(
("blog:article-detail", "articles:article-detail"),
kwargs={"slug": "article-slug"},
)
"""
for viewname in viewnames:
try:
return reverse(viewname, urlconf, args, kwargs, *fargs, **fkwargs)
except NoReverseMatch:
pass
raise NoReverseMatch(
"Reverse for any of '%s' with arguments '%s' and keyword arguments"
" '%s' not found." % ("', '".join(viewnames), args or [], kwargs or {})
)
def reverse_app(namespaces, viewname, *args, languages=None, **kwargs):
"""
Reverse app URLs, preferring the active language.
``reverse_app`` first generates a list of viewnames and passes them on
to ``reverse_any``.
Assuming that we're trying to reverse the URL of an article detail view,
that the project is configured with german, english and french as available
languages, french as active language and that the current article is a
publication, the viewnames are:
- ``apps-fr.publications.article-detail``
- ``apps-fr.articles.article-detail``
- ``apps-de.publications.article-detail``
- ``apps-de.articles.article-detail``
- ``apps-en.publications.article-detail``
- ``apps-en.articles.article-detail``
reverse_app tries harder returning an URL in the correct language than
returning an URL for the correct instance namespace.
Example::
url = reverse_app(
("category-1", "blog"),
"post-detail",
kwargs={"year": 2016, "slug": "my-cat"},
)
"""
if languages is None:
current = get_language()
languages = sorted(
(row[0] for row in settings.LANGUAGES), key=lambda lang: lang != current
)
viewnames = [
":".join(r)
for r in itertools.product(
(
f"{_APPS_MODEL.LANGUAGE_CODES_NAMESPACE}-{language}"
for language in languages
),
(namespaces if isinstance(namespaces, (list, tuple)) else (namespaces,)),
(viewname,),
)
]
return reverse_any(viewnames, *args, **kwargs)
def reverse_fallback(fallback, fn, *args, **kwargs):
"""
Returns the result of ``fn(*args, **kwargs)``, or ``fallback`` if the
former raises a ``NoReverseMatch`` exception. This is especially useful for
reversing app URLs from outside the app and you do not want crashes if the
app isn't available anywhere.
The following two examples are equivalent, choose whichever you like best::
reverse_fallback(
"/",
lambda: reverse_app(
("articles",),
"article-detail",
kwargs={"slug": self.slug},
),
)
reverse_fallback(
"/",
reverse_app
("articles",),
"article-detail",
kwargs={"slug": self.slug},
)
"""
try:
return fn(*args, **kwargs)
except NoReverseMatch:
return fallback
def apps_urlconf(*, apps=None):
"""
Generates a dynamic URLconf Python module including all application page
types in their assigned place and adding the ``urlpatterns`` from
``ROOT_URLCONF`` at the end. Returns the value of ``ROOT_URLCONF`` directly
if there are no active application page types.
Since Django uses an LRU cache for URL resolvers, we try hard to only
generate a changed URLconf when application URLs actually change.
The application URLconfs are put in nested namespaces:
- The outer application namespace is ``apps`` by default. This value can be
overridden by setting the ``LANGUAGE_CODES_NAMESPACE`` class attribute of
the page class to a different value. The instance namespaces consist of
the ``LANGUAGE_CODES_NAMESPACE`` value with a language added at the end.
As long as you're always using ``reverse_app`` you do not have to know
the specifics.
- The inner namespace is the app namespace, where the application
namespace is defined by the app itself (assign ``app_name`` in the
same module as ``urlpatterns``) and the instance namespace is defined
by the application name (from ``TYPES``).
Modules stay around as long as the Python (most of the time WSGI) process
lives. Unloading modules is tricky and probably not worth it since the
URLconf modules shouldn't gobble up much memory.
The set of applications can be overridden by passing a list of
``(path, page_type, app_namespace, language_code)`` tuples.
"""
if apps is None:
fields = ("path", "page_type", "app_namespace", "language_code")
apps = (
_APPS_MODEL._default_manager.active()
.with_tree_fields(False)
.exclude(app_namespace="")
.values_list(*fields)
.order_by(*fields)
)
if not apps:
# No point wrapping ROOT_URLCONF if there are no additional URLs
return settings.ROOT_URLCONF
key = ",".join(itertools.chain.from_iterable(apps))
module_name = "urlconf_%s" % hashlib.md5(key.encode("utf-8")).hexdigest()
if module_name not in sys.modules:
types = {app.key: app for app in _APPS_MODEL.TYPES if app.get("urlconf")}
m = ModuleType(module_name)
mapping = defaultdict(list)
for path, page_type, app_namespace, language_code in apps:
if page_type not in types:
continue
mapping[language_code].append(
re_path(
r"^%s" % re.escape(path.lstrip("/")),
include(types[page_type]["urlconf"], namespace=app_namespace),
)
)
m.urlpatterns = [
re_path(
r"",
include(
(instances, _APPS_MODEL.LANGUAGE_CODES_NAMESPACE),
namespace="%s-%s"
% (_APPS_MODEL.LANGUAGE_CODES_NAMESPACE, language_code),
),
)
for language_code, instances in mapping.items()
]
# Append patterns from ROOT_URLCONF instead of including them because
# i18n_patterns only work in the root URLconf.
urlconf = import_module(settings.ROOT_URLCONF)
m.urlpatterns += urlconf.urlpatterns
for attribute in ["handler400", "handler403", "handler404", "handler500"]:
if hasattr(urlconf, attribute):
setattr(m, attribute, getattr(urlconf, attribute))
sys.modules[module_name] = m
return module_name
def page_for_app_request(request, *, queryset=None):
"""
Returns the current page if we're inside an app. Should only be called
while processing app views. Will pass along exceptions caused by
non-existing or duplicated apps (this should never happen inside an app
because :func:`~feincms3.applications.apps_urlconf` wouldn't have added the app
in the first place if a matching page wouldn't exist, but still.)
Example::
def article_detail(request, slug):
page = page_for_app_request(request)
page.activate_language(request)
instance = get_object_or_404(Article, slug=slug)
return render(
request,
"articles/article_detail.html",
{"article": article, "page": page},
)
It is possible to override the queryset used to fetch a page instance. The
default implementation simply uses the first concrete subclass of
:class:`~feincms3.applications.PageTypeMixin`.
"""
if queryset is None:
queryset = _APPS_MODEL._default_manager.active().with_tree_fields()
# Unguarded - if this fails, we shouldn't even be here.
return queryset.get(
language_code=request.resolver_match.namespaces[0][
len(_APPS_MODEL.LANGUAGE_CODES_NAMESPACE) + 1 :
],
app_namespace=request.resolver_match.namespaces[1],
)
def apps_middleware(get_response):
"""
This middleware must be put in ``MIDDLEWARE``; it simply assigns
the return value of :func:`~feincms3.applications.apps_urlconf` to
``request.urlconf``. This middleware should probably be one of the first
since it has to run before any resolving happens.
"""
def middleware(request):
request.urlconf = apps_urlconf()
return get_response(request)
return middleware
class TemplateType(Type):
_REQUIRED = {"key", "title", "template_name", "regions", "app_namespace"}
def __init__(self, **kwargs):
kwargs.setdefault("app_namespace", lambda instance: "")
super().__init__(**kwargs)
class ApplicationType(Type):
_REQUIRED = {"key", "title", "urlconf", "app_namespace"}
def __init__(self, **kwargs):
kwargs.setdefault("template_name", "")
kwargs.setdefault("regions", [])
kwargs.setdefault("app_namespace", lambda instance: instance.page_type)
super().__init__(**kwargs)
class PageTypeMixin(models.Model):
"""
The page class should inherit this mixin. It adds a ``page_type`` field
containing the selected page type, and an ``app_namespace`` field which
contains the instance namespace of the application, if the type of the page
is an application type. The field is empty e.g. for template page types.
Note that currently the :class:`~feincms3.mixins.LanguageMixin` is a
required dependency of :mod:`feincms3.applications`.
``TYPES`` contains a list of page type instances, either
:class:`~feincms3.applications.TemplateType` or
:class:`~feincms3.applications.ApplicationType` and maybe others in the
future. The configuration values are specific to each type, common to all
of them are a key (stored in the ``page_type`` field) and a user-visible
title.
Template types additionally require a ``template_name`` and a ``regions``
value.
Application types require a ``urlconf`` value and support the following
options:
- ``urlconf``: The path to the URLconf module for the application. Besides
the ``urlpatterns`` list the module should probably also specify a
``app_name``.
- ``required_fields``: A list of page class fields which must be non-empty
for the application to work. The values are checked in
``PageTypeMixin.clean_fields``.
- ``app_namespace``: A callable which receives the page instance
as its only argument and returns a string suitable for use as an
instance namespace.
Usage::
from content_editor.models import Region
from django.utils.translation import gettext_lazy as _
from feincms3.applications import PageTypeMixin
from feincms3.mixins import LanguageMixin
from feincms3.pages import AbstractPage
class Page(AbstractPage, PageTypeMixin, LanguageMixin):
TYPES = [
# It is recommended to always put a TemplateType type first
# because it will be the default type:
TemplateType(
key="standard",
title=_("Standard"),
template_name="pages/standard.html",
regions=[Region(key="main", title=_("Main"))],
),
ApplicationType(
key="publications",
title=_("publications"),
urlconf="app.articles.urls",
),
ApplicationType(
key="blog",
title=_("blog"),
urlconf="app.articles.urls",
),
ApplicationType(
key="contact",
title=_("contact form"),
urlconf="app.forms.contact_urls",
),
ApplicationType(
key="teams",
title=_("teams"),
urlconf="app.teams.urls",
app_namespace=lambda page: f"{page.page_type}-{page.team_id}",
required_fields=["team"],
),
]
"""
#: Override this to set a different name for the outer namespace.
LANGUAGE_CODES_NAMESPACE = "apps"
page_type = ChoicesCharField(_("page type"), max_length=100)
app_namespace = models.CharField(
("app instance namespace"), max_length=100, blank=True, editable=False
)
class Meta:
abstract = True
@property
def type(self):
"""
Returns the appropriate page type instance, either the selected type or
the first type in the list of ``TYPES`` if no type is selected or if
the type does not exist anymore.
"""
return self.TYPES_DICT.get(self.page_type, self.TYPES[0])
@property
def regions(self):
return self.type.regions
def save(self, *args, **kwargs):
"""
Updates ``app_namespace``.
"""
self.app_namespace = self.type.app_namespace(self)
super().save(*args, **kwargs)
save.alters_data = True
def clean_fields(self, exclude=None):
"""
Checks that required fields are given and that an app namespace only
exists once per site and language.
"""
exclude = [] if exclude is None else exclude
super().clean_fields(exclude)
type = self.type
if type and type.get("required_fields"):
missing = [
field for field in type["required_fields"] if not getattr(self, field)
]
if missing:
error = _('This field is required for the page type "%s".') % (
self.get_page_type_display(),
)
errors = {}
for field in missing:
if field in exclude:
errors.setdefault("__all__", []).append(f"{field}: {error}")
else:
errors[field] = error
raise ValidationError(errors)
if type and type.app_namespace(self):
if self.__class__._default_manager.filter(
Q(app_namespace=type.app_namespace(self)),
Q(language_code=self.language_code),
~Q(pk=self.pk),
).exists():
fields = ["__all__", "page_type"]
fields.extend(type.get("required_fields", ()))
raise ValidationError(
{
field: _("This exact app already exists.")
for field in fields
if field not in exclude
}
)
@staticmethod
def fill_page_type_choices(sender, **kwargs):
"""
Fills in the choices for ``page_type`` from the ``TYPES``
class variable. This method is a receiver of Django's
``class_prepared`` signal.
"""
if issubclass(sender, PageTypeMixin) and not sender._meta.abstract:
field = sender._meta.get_field("page_type")
field.choices = [(app.key, app.title) for app in sender.TYPES]
field.default = sender.TYPES[0].key
sender.TYPES_DICT = {app.key: app for app in sender.TYPES}
global _APPS_MODEL
_APPS_MODEL = sender
@classmethod
def check(cls, **kwargs):
errors = super().check(**kwargs)
errors.extend(cls._check_feincms3_appsmixin_templatemixin_clash(**kwargs))
return errors
@classmethod
def _check_feincms3_appsmixin_templatemixin_clash(cls, **kwargs):
from feincms3.mixins import TemplateMixin
if not cls._meta.abstract and issubclass(cls, TemplateMixin):
return [
Warning(
f"The model {cls._meta.label} extends both"
" PageTypeMixin and TemplateMixin. The new PageTypeMixin includes"
" the functionality of the TemplateMixin, please remove"
" the latter, fill in ``page_type`` fields either from"
" ``application`` (if non-empty) or from ``template_key``,"
" and rename ``app_instance_namespace`` to ``app_namespace``.",
obj=cls,
id="feincms3.W002",
)
]
return []
@property
def application(self):
warnings.warn(
"AppsMixin.application is PageTypeMixin.page_type now.",
DeprecationWarning,
stacklevel=2,
)
return self.page_type
@property
def app_instance_namespace(self):
warnings.warn(
"AppsMixin.app_instance_namespace is PageTypeMixin.app_namespace now.",
DeprecationWarning,
stacklevel=2,
)
return self.app_namespace
signals.class_prepared.connect(PageTypeMixin.fill_page_type_choices)
|
py
|
1a59e4ae3084bc7d83f4c06851ea35c4ae056136
|
# -*- coding: utf-8 -*-
# @Author: LC
# @Date: 2017-03-23 17:14:51
# @Last modified by: LC
# @Last Modified time: 2017-03-23 17:14:55
# @Email: [email protected]
|
py
|
1a59e5033fc3d2bfccd073e4ba54d76555c30ea3
|
import ipaddress, subprocess
from flask import request, Response
from flask_restful import Resource
from wgpt.models import db, Client, Server, Cluster, ClientSchema, ServerSchema, ClusterSchema
from wgpt.wg_ssh_update import send_ssh_command
clients_schema = ClientSchema(many=True)
client_schema = ClientSchema()
def generate_client_keypair():
p_genkey = subprocess.Popen(["wg", "genkey"], stdout=subprocess.PIPE)
privkey = p_genkey.communicate()[0].decode().strip()
p_pubkey = subprocess.Popen(["wg", "pubkey"], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
p_pubkey.stdin.write(privkey.encode("ascii"))
pubkey = p_pubkey.communicate()[0].decode().strip()
return privkey, pubkey
def get_next_availabe_ip(network, client_ips):
hosts_iterator = (host for host in network if host not in client_ips)
try:
client_ip = str(next(hosts_iterator))
return client_ip
except:
return False
class AddClientToServer(Resource):
def post(self, server_id):
json_data = request.get_json(force=True)
if not json_data:
return {'message':'No input data provided'}, 400
server = Server.query.filter_by(id=server_id).first()
if not server:
return {'status':'failure', 'message':'Server not found'}
# Fetch client information for server to obtain free IP addresses
clients = Client.query.filter_by(server_id=json_data['server_id'])
client_ipv4 = None
client_ipv6 = None
if server.server_networkv4:
server_networkv4 = ipaddress.ip_network(server.server_networkv4).hosts()
clients_ipv4 = []
for client in clients:
clients_ipv4.append(ipaddress.ip_address(client.client_ipv4))
client_ipv4 = get_next_availabe_ip(server_networkv4, clients_ipv4)
if server.server_networkv6:
server_networkv6 = ipaddress.ip_network(server.server_networkv6).hosts()
clients_ipv6 = []
for client in clients:
clients_ipv6.append(ipaddress.ip_address(client.client_ipv6))
client_ipv6 = get_next_availabe_ip(server_networkv6, clients_ipv6)
# Generate keys
if client_ipv4 or client_ipv6:
client_keypair = generate_client_keypair()
client_privkey = client_keypair[0]
client_pubkey = client_keypair[1]
else:
return {'status':'failure','message':'Could not assign IP to client. Network full?'}
# Create the client through SSH on server if configured
if server.server_ssh_key:
if client_ipv4:
ssh_success = send_ssh_command('add', server.server_ip + ' ' + server.server_ssh_key, client_pubkey, client_ipv4)
if not ssh_success:
return {'status':'failure', 'message':'There was a problem with the ssh provisioning.'}
if client_ipv6:
ssh_success = send_ssh_command('add', server.server_ip + ' ' + server.server_ssh_key, client_pubkey, client_ipv6)
if not ssh_success:
return {'status':'failure', 'message':'There was a problem with the ssh provisioning.'}
# Store the client information in the database
new_client = Client(
client_ipv4 = client_ipv4,
client_ipv6 = client_ipv6,
client_pubkey = client_pubkey,
server_id = json_data['server_id'],
cluster_id = None,
client_description = json_data['client_description']
)
db.session.add(new_client)
db.session.commit()
data= {}
data['client_id'] = new_client.id
data['client_ipv4'] = client_ipv4
data['client_ipv6'] = client_ipv6
data['client_pubkey'] = client_pubkey
data['client_privkey'] = client_privkey
data['server_ip'] = server.server_ip
data['server_port'] = server.server_port
data['server_pubkey'] = server.server_pubkey
if server.server_dns is not None:
data['server_dns'] = server.server_dns
data['server_id'] = json_data['server_id']
return {'status': 'success', 'data': data}
class AddClientToCluster(Resource):
def post(self, cluster_id):
json_data = request.get_json(force=True)
if not json_data:
return {'message':'No input data provided'}, 400
cluster = Cluster.query.filter_by(id=cluster_id).first()
if not cluster:
return {'status':'failure', 'message':'Cluster not found'}
# Get relevant server data
# Get occupied client ip addresses on server
clients = Client.query.filter_by(cluster_id=cluster_id)
client_ipv4 = None
client_ipv6 = None
if cluster.cluster_networkv4:
cluster_networkv4 = ipaddress.ip_network(cluster.cluster_networkv4).hosts()
clients_ipv4 = []
for client in clients:
clients_ipv4.append(ipaddress.ip_address(client.client_ipv4))
client_ipv4 = get_next_availabe_ip(cluster_networkv4, clients_ipv4)
if cluster.cluster_networkv6:
cluster_networkv6 = ipaddress.ip_network(cluster.cluster_networkv6).hosts()
clients_ipv6 = []
for client in clients:
clients_ipv6.append(ipaddress.ip_address(client.client_ipv6))
client_ipv6 = get_next_availabe_ip(cluster_networkv6, clients_ipv6)
# Generate keys
if client_ipv4 or client_ipv6:
client_keypair = generate_client_keypair()
client_privkey = client_keypair[0]
client_pubkey = client_keypair[1]
else:
return {'status':'failure','message':'Could not assign IP to client. Network full?'}
# Store the client information in the database
new_client = Client(
client_ipv4 = client_ipv4,
client_ipv6 = client_ipv6,
client_description = json_data['client_description'],
client_pubkey = client_pubkey,
cluster_id = json_data['cluster_id'],
server_id = None,
)
db.session.add(new_client)
db.session.commit()
return {'status': 'success', 'data': {
'client_id': new_client.id,
'client_ipv4': client_ipv4,
'client_ipv6': client_ipv6,
'client_pubkey': client_pubkey,
'client_privkey': client_privkey,
'cluster_pubkey': cluster.cluster_pubkey,
'cluster_dns': cluster.cluster_dns,
'cluster_id': cluster.id
}
}
class GetClient(Resource):
def get(self, client_id):
clients = Client.query.filter_by(id=client_id).first()
if not clients:
return {'status':'failure', 'message':'No client with that id found'}, 200
clients = client_schema.dump(clients)
return {'status':'success', 'data':clients}, 200
class GetClients(Resource):
def get(self):
clients = Client.query.all()
clients = clients_schema.dump(clients)
return {'status':'success', 'data':clients}, 200
class GetClientsByServerId(Resource):
def get(self, server_id):
clients = Client.query.filter_by(server_id=server_id).all()
if not clients:
return {'status':'failure', 'message':'no clients found for that server id'}
clients = clients_schema.dump(clients)
return {'status':'success', 'data':clients}, 200
class GetClientsByClusterId(Resource):
def get(self, cluster_id):
clients = Client.query.filter_by(cluster_id=cluster_id).all()
if not clients:
return {'status':'failure', 'message':'no clients found for that cluster id'}
clients = clients_schema.dump(clients)
return {'status':'success', 'data':clients}, 200
class DeleteClient(Resource):
def delete(self, client_id):
if client_id:
client = Client.query.filter_by(id=client_id).first()
if not client:
return {'status':'failure', 'message':'no clients found for that id'}
server = Server.query.filter_by(id=client.server_id).first()
# Create the client through SSH on server if configured
if server.server_ssh_key:
if client.client_ipv4:
ssh_success = send_ssh_command('remove', server.server_ip + ' ' + server.server_ssh_key, client.client_pubkey, client.client_ipv4)
if not ssh_success:
return {'status':'failure', 'message':'There was a problem with the ssh provisioning.'}
if client.client_ipv6:
ssh_success = send_ssh_command('remove', server.server_ip + ' ' + server.server_ssh_key, client.client_pubkey, client.client_ipv6)
if not ssh_success:
return {'status':'failure', 'message':'There was a problem with the ssh provisioning.'}
db.session.delete(client)
db.session.commit()
return { 'status' : 'success', 'data':{'client_id':client_id}}, 200
|
py
|
1a59e63e8ca95ebecb570d8e3db583d37665aa55
|
numberat = 4
divideby = 2
print('I will now print you Marsenne Prime numbers.')
while 1 == 1:
if (numberat - 1) % divideby == 0 or divideby == numberat - 1:
if divideby == numberat - 1:
print('Here is one:')
print(numberat - 1)
numberat *= 2
divideby = 2
else:
divideby += 1
|
py
|
1a59e6873274bbc27561fcf7b11a7816ff42a553
|
# -*- coding: utf-8 -*-
import lemoncheesecake.api as lcc
from lemoncheesecake.matching import check_that, check_that_in, is_, is_str, is_list, is_integer, require_that, \
require_that_in, has_length
from common.base_test import BaseTest
SUITE = {
"description": "Method 'get_account_history'"
}
@lcc.prop("main", "type")
@lcc.prop("positive", "type")
@lcc.tags("api", "history_api", "get_account_history")
@lcc.suite("Check work of method 'get_account_history'", rank=1)
class GetAccountHistory(BaseTest):
def __init__(self):
super().__init__()
self.__database_api_identifier = None
self.__registration_api_identifier = None
self.__history_api_identifier = None
self.echo_acc0 = None
def setup_suite(self):
super().setup_suite()
lcc.set_step("Setup for {}".format(self.__class__.__name__))
self.__database_api_identifier = self.get_identifier("database")
self.__registration_api_identifier = self.get_identifier("registration")
self.__history_api_identifier = self.get_identifier("history")
lcc.log_info(
"API identifiers are: database='{}', registration='{}', "
"history='{}'".format(self.__database_api_identifier, self.__registration_api_identifier,
self.__history_api_identifier))
self.echo_acc0 = self.get_account_id(self.accounts[0], self.__database_api_identifier,
self.__registration_api_identifier)
lcc.log_info("Echo account is '{}'".format(self.echo_acc0))
@lcc.test("Simple work of method 'get_account_history'")
def method_main_check(self):
operation_history_obj = "{}0".format(self.get_object_type(self.echo.config.object_types.OPERATION_HISTORY))
stop, start = operation_history_obj, operation_history_obj
limit = 1
lcc.set_step("Get account history")
params = [self.echo_acc0, stop, limit, start]
response_id = self.send_request(self.get_request("get_account_history", params), self.__history_api_identifier)
response = self.get_response(response_id)
lcc.log_info(
"Call method 'get_account_history' with: account='{}', stop='{}', limit='{}', start='{}' parameters".format(
self.echo_acc0, stop, limit, start))
lcc.set_step("Check response from method 'get_account_history'")
results = response["result"]
check_that(
"'number of history results'",
results, has_length(limit)
)
for result in results:
if not self.validator.is_operation_history_id(result["id"]):
lcc.log_error("Wrong format of 'operation id', got: {}".format(result["id"]))
else:
lcc.log_info("'operation_id' has correct format: operation_history_id")
check_that_in(
result,
"op", is_list(),
"result", is_list(),
"block_num", is_integer(),
"trx_in_block", is_integer(),
"op_in_trx", is_integer(),
"virtual_op", is_integer(),
quiet=True
)
@lcc.prop("positive", "type")
@lcc.tags("api", "history_api", "get_account_history")
@lcc.suite("Positive testing of method 'get_account_history'", rank=2)
class PositiveTesting(BaseTest):
def __init__(self):
super().__init__()
self.__database_api_identifier = None
self.__registration_api_identifier = None
self.__history_api_identifier = None
self.echo_acc0 = None
self.echo_acc1 = None
def get_account_history(self, account, stop, limit, start, negative=False):
lcc.log_info("Get '{}' account history".format(account))
params = [account, stop, limit, start]
response_id = self.send_request(self.get_request("get_account_history", params), self.__history_api_identifier)
return self.get_response(response_id, negative=negative)
def setup_suite(self):
super().setup_suite()
self._connect_to_echopy_lib()
lcc.set_step("Setup for {}".format(self.__class__.__name__))
self.__database_api_identifier = self.get_identifier("database")
self.__registration_api_identifier = self.get_identifier("registration")
self.__history_api_identifier = self.get_identifier("history")
lcc.log_info(
"API identifiers are: database='{}', registration='{}', "
"history='{}'".format(self.__database_api_identifier, self.__registration_api_identifier,
self.__history_api_identifier))
self.echo_acc0 = self.get_account_id(self.accounts[0], self.__database_api_identifier,
self.__registration_api_identifier)
self.echo_acc1 = self.get_account_id(self.accounts[1], self.__database_api_identifier,
self.__registration_api_identifier)
lcc.log_info("Echo accounts are: #1='{}', #2='{}'".format(self.echo_acc0, self.echo_acc1))
def teardown_suite(self):
self._disconnect_to_echopy_lib()
super().teardown_suite()
@lcc.test("Check new account history")
@lcc.depends_on("HistoryApi.GetAccountHistory.GetAccountHistory.method_main_check")
def new_account_history(self, get_random_valid_account_name):
new_account = get_random_valid_account_name
operation_history_obj = "{}0".format(self.get_object_type(self.echo.config.object_types.OPERATION_HISTORY))
stop, start = operation_history_obj, operation_history_obj
limit = 100
lcc.set_step("Create and get new account")
new_account = self.get_account_id(new_account, self.__database_api_identifier,
self.__registration_api_identifier)
lcc.log_info("New Echo account created, account_id='{}'".format(new_account))
lcc.set_step("Get new account history")
response = self.get_account_history(new_account, stop, limit, start)
lcc.set_step("Check new account history")
expected_number_of_operations = 1
require_that(
"'new account history'",
response["result"], has_length(expected_number_of_operations)
)
check_that(
"'id single operation'",
response["result"][0]["op"][0],
is_(self.echo.config.operation_ids.ACCOUNT_CREATE)
)
@lcc.test("Check limit number of operations to retrieve")
@lcc.depends_on("HistoryApi.GetAccountHistory.GetAccountHistory.method_main_check")
def limit_operations_to_retrieve(self, get_random_valid_account_name, get_random_integer_up_to_hundred):
new_account = get_random_valid_account_name
operation_history_obj = "{}0".format(self.get_object_type(self.echo.config.object_types.OPERATION_HISTORY))
stop, start = operation_history_obj, operation_history_obj
min_limit = 1
max_limit = 100
default_account_create_operation, default_get_assets_operation = 1, 1
operation_count = get_random_integer_up_to_hundred
lcc.set_step("Create and get new account")
new_account = self.get_account_id(new_account, self.__database_api_identifier,
self.__registration_api_identifier)
lcc.log_info("New Echo account created, account_id='{}'".format(new_account))
lcc.set_step("Perform operations using a new account. Operation count equal to limit")
self.utils.perform_transfer_operations(self, new_account, self.echo_acc0, self.__database_api_identifier,
operation_count=operation_count, only_in_history=True)
lcc.log_info("Fill account history with '{}' number of transfer operations".format(operation_count))
lcc.set_step(
"Check that count of new account history with the maximum limit is equal to operation_count")
response = self.get_account_history(new_account, stop, max_limit, start)
if operation_count == 1:
operation_count = operation_count + default_get_assets_operation
check_that(
"'number of history results'",
response["result"], has_length(operation_count + default_account_create_operation)
)
lcc.set_step("Check minimum list length account history")
response = self.get_account_history(new_account, stop, min_limit, start)
check_that(
"'number of history results'",
response["result"], has_length(min_limit)
)
lcc.set_step("Perform operations using a new account to create max_limit operations")
operation_count = max_limit - operation_count - default_account_create_operation
self.utils.perform_transfer_operations(self, new_account, self.echo_acc0, self.__database_api_identifier,
operation_count=operation_count, only_in_history=True)
lcc.log_info(
"Fill account history with '{}' number of transfer operations".format(operation_count))
lcc.set_step(
"Check that count of new account history with the limit = max_limit is equal to max_limit")
response = self.get_account_history(new_account, stop, max_limit, start)
check_that(
"'number of history results'",
response["result"], has_length(max_limit)
)
@lcc.test("Check stop and start IDs of the operations in account history")
@lcc.depends_on("HistoryApi.GetAccountHistory.GetAccountHistory.method_main_check")
def stop_and_start_operations(self, get_random_integer, get_random_integer_up_to_hundred):
transfer_amount_1 = get_random_integer
transfer_amount_2 = get_random_integer_up_to_hundred
operation_history_obj = "{}0".format(self.get_object_type(self.echo.config.object_types.OPERATION_HISTORY))
stop, start = operation_history_obj, operation_history_obj
operations = []
operation_ids = []
lcc.set_step("Perform one operation")
operation_count = 1
broadcast_result = self.utils.perform_transfer_operations(self, self.echo_acc0, self.echo_acc1,
self.__database_api_identifier,
transfer_amount=transfer_amount_1,
operation_count=operation_count, only_in_history=True)
lcc.log_info("Fill account history with '{}' number of transfer operations".format(operation_count))
operations.append(broadcast_result["trx"]["operations"][0])
limit = operation_count
lcc.set_step("Get account history. Limit: '{}'".format(limit))
response = self.get_account_history(self.echo_acc0, stop, limit, start)
lcc.set_step("Check account history to see added operation and store operation id")
require_that(
"'account history'",
response["result"][0]["op"], is_list(operations[0])
)
operation_id = response["result"][0]["id"]
lcc.set_step("Perform another operations")
operation_count = 5
broadcast_result = self.utils.perform_transfer_operations(self, self.echo_acc0, self.echo_acc1,
self.__database_api_identifier,
transfer_amount=transfer_amount_2,
operation_count=operation_count, only_in_history=True)
lcc.log_info("Fill account history with '{}' number of transfer operations".format(operation_count))
for i in range(operation_count):
operations.append(broadcast_result["trx"]["operations"][i])
limit = operation_count
stop = operation_id
lcc.set_step("Get account history. Stop: '{}', limit: '{}'".format(stop, limit))
response = self.get_account_history(self.echo_acc0, stop, limit, start)
lcc.set_step("Check account history to see added operations and store operation ids")
operations.reverse()
for i in range(limit):
require_that(
"'account history'",
response["result"][i]["op"], is_list(operations[i])
)
operation_ids.append(response["result"][i]["id"])
limit = operation_count + 1
stop = operation_id
start = operation_ids[0]
lcc.set_step("Get account history. Stop: '{}', limit: '{}' and start: '{}'".format(stop, limit, start))
results = self.get_account_history(self.echo_acc0, stop, limit, start)["result"]
lcc.set_step("Check account history to see operations from the selected ids interval")
for i, result in enumerate(results):
lcc.log_info("Check operation #{}:".format(i))
require_that_in(
result,
["id"], is_str(operation_ids[i]),
["op"], is_list(operations[i])
)
|
py
|
1a59e77a376ab7537fa2366a870bcad552d7b7f9
|
from .alexnet import *
from .densenet import *
from .le_net import *
from .resnet import *
|
py
|
1a59e7b5b3260516cc006655fcddb625b9229cc5
|
# Sourced from here: https://python-forum.io/Thread-Learning-Python-with-a-Caesar-cipher?pid=131456#pid131456
# Modified by Drone4four
import string
def encrypt(message, shift=0, replace='', alphabet=string.ascii_letters):
reply = ''
for letter in message:
try:
position = alphabet.index(letter) # Get ord value of letter in alphabet
position = (position+shift) % len(alphabet) # Shift ord value
reply += alphabet[position] # Convert shifted ord value back to a letter
except:
reply += replace # Use replace for letters not found in alphabet
return reply
message = "Hello World"
encrypted = encrypt(message, shift=1, replace=' ')
decrypted = encrypt(encrypted, shift=-2, replace=' ')
print(encrypted)
print(decrypted)
|
py
|
1a59e84c2be26c3997327048f87fb54a39a4cc59
|
"""The tests for the State vacuum Mqtt platform."""
from copy import deepcopy
import json
import pytest
from homeassistant.components import vacuum
from homeassistant.components.mqtt import CONF_COMMAND_TOPIC, CONF_STATE_TOPIC
from homeassistant.components.mqtt.vacuum import CONF_SCHEMA, schema_state as mqttvacuum
from homeassistant.components.mqtt.vacuum.schema import services_to_strings
from homeassistant.components.mqtt.vacuum.schema_state import SERVICE_TO_STRING
from homeassistant.components.vacuum import (
ATTR_BATTERY_ICON,
ATTR_BATTERY_LEVEL,
ATTR_FAN_SPEED,
ATTR_FAN_SPEED_LIST,
DOMAIN,
SERVICE_CLEAN_SPOT,
SERVICE_LOCATE,
SERVICE_PAUSE,
SERVICE_RETURN_TO_BASE,
SERVICE_START,
SERVICE_STOP,
STATE_CLEANING,
STATE_DOCKED,
)
from homeassistant.const import (
CONF_NAME,
CONF_PLATFORM,
ENTITY_MATCH_ALL,
STATE_UNKNOWN,
)
from homeassistant.setup import async_setup_component
from .test_common import (
help_test_availability_when_connection_lost,
help_test_availability_without_topic,
help_test_custom_availability_payload,
help_test_default_availability_payload,
help_test_discovery_broken,
help_test_discovery_removal,
help_test_discovery_update,
help_test_discovery_update_attr,
help_test_entity_debug_info_message,
help_test_entity_device_info_remove,
help_test_entity_device_info_update,
help_test_entity_device_info_with_connection,
help_test_entity_device_info_with_identifier,
help_test_entity_id_update_discovery_update,
help_test_entity_id_update_subscriptions,
help_test_setting_attribute_via_mqtt_json_message,
help_test_setting_attribute_with_template,
help_test_unique_id,
help_test_update_with_json_attrs_bad_JSON,
help_test_update_with_json_attrs_not_dict,
)
from tests.common import async_fire_mqtt_message
from tests.components.vacuum import common
COMMAND_TOPIC = "vacuum/command"
SEND_COMMAND_TOPIC = "vacuum/send_command"
STATE_TOPIC = "vacuum/state"
DEFAULT_CONFIG = {
CONF_PLATFORM: "mqtt",
CONF_SCHEMA: "state",
CONF_NAME: "mqtttest",
CONF_COMMAND_TOPIC: COMMAND_TOPIC,
mqttvacuum.CONF_SEND_COMMAND_TOPIC: SEND_COMMAND_TOPIC,
CONF_STATE_TOPIC: STATE_TOPIC,
mqttvacuum.CONF_SET_FAN_SPEED_TOPIC: "vacuum/set_fan_speed",
mqttvacuum.CONF_FAN_SPEED_LIST: ["min", "medium", "high", "max"],
}
DEFAULT_CONFIG_2 = {
vacuum.DOMAIN: {"platform": "mqtt", "schema": "state", "name": "test"}
}
async def test_default_supported_features(hass, mqtt_mock):
"""Test that the correct supported features."""
assert await async_setup_component(
hass, vacuum.DOMAIN, {vacuum.DOMAIN: DEFAULT_CONFIG}
)
await hass.async_block_till_done()
entity = hass.states.get("vacuum.mqtttest")
entity_features = entity.attributes.get(mqttvacuum.CONF_SUPPORTED_FEATURES, 0)
assert sorted(services_to_strings(entity_features, SERVICE_TO_STRING)) == sorted(
["start", "stop", "return_home", "battery", "status", "clean_spot"]
)
async def test_all_commands(hass, mqtt_mock):
"""Test simple commands send to the vacuum."""
config = deepcopy(DEFAULT_CONFIG)
config[mqttvacuum.CONF_SUPPORTED_FEATURES] = services_to_strings(
mqttvacuum.ALL_SERVICES, SERVICE_TO_STRING
)
assert await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: config})
await hass.async_block_till_done()
await hass.services.async_call(
DOMAIN, SERVICE_START, {"entity_id": ENTITY_MATCH_ALL}, blocking=True
)
mqtt_mock.async_publish.assert_called_once_with(COMMAND_TOPIC, "start", 0, False)
mqtt_mock.async_publish.reset_mock()
await hass.services.async_call(
DOMAIN, SERVICE_STOP, {"entity_id": ENTITY_MATCH_ALL}, blocking=True
)
mqtt_mock.async_publish.assert_called_once_with(COMMAND_TOPIC, "stop", 0, False)
mqtt_mock.async_publish.reset_mock()
await hass.services.async_call(
DOMAIN, SERVICE_PAUSE, {"entity_id": ENTITY_MATCH_ALL}, blocking=True
)
mqtt_mock.async_publish.assert_called_once_with(COMMAND_TOPIC, "pause", 0, False)
mqtt_mock.async_publish.reset_mock()
await hass.services.async_call(
DOMAIN, SERVICE_LOCATE, {"entity_id": ENTITY_MATCH_ALL}, blocking=True
)
mqtt_mock.async_publish.assert_called_once_with(COMMAND_TOPIC, "locate", 0, False)
mqtt_mock.async_publish.reset_mock()
await hass.services.async_call(
DOMAIN, SERVICE_CLEAN_SPOT, {"entity_id": ENTITY_MATCH_ALL}, blocking=True
)
mqtt_mock.async_publish.assert_called_once_with(
COMMAND_TOPIC, "clean_spot", 0, False
)
mqtt_mock.async_publish.reset_mock()
await hass.services.async_call(
DOMAIN, SERVICE_RETURN_TO_BASE, {"entity_id": ENTITY_MATCH_ALL}, blocking=True
)
mqtt_mock.async_publish.assert_called_once_with(
COMMAND_TOPIC, "return_to_base", 0, False
)
mqtt_mock.async_publish.reset_mock()
await common.async_set_fan_speed(hass, "medium", "vacuum.mqtttest")
mqtt_mock.async_publish.assert_called_once_with(
"vacuum/set_fan_speed", "medium", 0, False
)
mqtt_mock.async_publish.reset_mock()
await common.async_send_command(hass, "44 FE 93", entity_id="vacuum.mqtttest")
mqtt_mock.async_publish.assert_called_once_with(
"vacuum/send_command", "44 FE 93", 0, False
)
mqtt_mock.async_publish.reset_mock()
await common.async_send_command(
hass, "44 FE 93", {"key": "value"}, entity_id="vacuum.mqtttest"
)
assert json.loads(mqtt_mock.async_publish.mock_calls[-1][1][1]) == {
"command": "44 FE 93",
"key": "value",
}
async def test_commands_without_supported_features(hass, mqtt_mock):
"""Test commands which are not supported by the vacuum."""
config = deepcopy(DEFAULT_CONFIG)
services = mqttvacuum.STRING_TO_SERVICE["status"]
config[mqttvacuum.CONF_SUPPORTED_FEATURES] = services_to_strings(
services, SERVICE_TO_STRING
)
assert await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: config})
await hass.async_block_till_done()
await hass.services.async_call(
DOMAIN, SERVICE_START, {"entity_id": ENTITY_MATCH_ALL}, blocking=True
)
mqtt_mock.async_publish.assert_not_called()
mqtt_mock.async_publish.reset_mock()
await hass.services.async_call(
DOMAIN, SERVICE_PAUSE, {"entity_id": ENTITY_MATCH_ALL}, blocking=True
)
mqtt_mock.async_publish.assert_not_called()
mqtt_mock.async_publish.reset_mock()
await hass.services.async_call(
DOMAIN, SERVICE_STOP, {"entity_id": ENTITY_MATCH_ALL}, blocking=True
)
mqtt_mock.async_publish.assert_not_called()
mqtt_mock.async_publish.reset_mock()
await hass.services.async_call(
DOMAIN, SERVICE_RETURN_TO_BASE, {"entity_id": ENTITY_MATCH_ALL}, blocking=True
)
mqtt_mock.async_publish.assert_not_called()
mqtt_mock.async_publish.reset_mock()
await hass.services.async_call(
DOMAIN, SERVICE_LOCATE, {"entity_id": ENTITY_MATCH_ALL}, blocking=True
)
mqtt_mock.async_publish.assert_not_called()
mqtt_mock.async_publish.reset_mock()
await hass.services.async_call(
DOMAIN, SERVICE_CLEAN_SPOT, {"entity_id": ENTITY_MATCH_ALL}, blocking=True
)
mqtt_mock.async_publish.assert_not_called()
mqtt_mock.async_publish.reset_mock()
await common.async_set_fan_speed(hass, "medium", "vacuum.mqtttest")
mqtt_mock.async_publish.assert_not_called()
mqtt_mock.async_publish.reset_mock()
await common.async_send_command(
hass, "44 FE 93", {"key": "value"}, entity_id="vacuum.mqtttest"
)
mqtt_mock.async_publish.assert_not_called()
async def test_status(hass, mqtt_mock):
"""Test status updates from the vacuum."""
config = deepcopy(DEFAULT_CONFIG)
config[mqttvacuum.CONF_SUPPORTED_FEATURES] = services_to_strings(
mqttvacuum.ALL_SERVICES, SERVICE_TO_STRING
)
assert await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: config})
await hass.async_block_till_done()
message = """{
"battery_level": 54,
"state": "cleaning",
"fan_speed": "max"
}"""
async_fire_mqtt_message(hass, "vacuum/state", message)
state = hass.states.get("vacuum.mqtttest")
assert state.state == STATE_CLEANING
assert state.attributes.get(ATTR_BATTERY_LEVEL) == 54
assert state.attributes.get(ATTR_BATTERY_ICON) == "mdi:battery-50"
assert state.attributes.get(ATTR_FAN_SPEED) == "max"
message = """{
"battery_level": 61,
"state": "docked",
"fan_speed": "min"
}"""
async_fire_mqtt_message(hass, "vacuum/state", message)
state = hass.states.get("vacuum.mqtttest")
assert state.state == STATE_DOCKED
assert state.attributes.get(ATTR_BATTERY_ICON) == "mdi:battery-charging-60"
assert state.attributes.get(ATTR_BATTERY_LEVEL) == 61
assert state.attributes.get(ATTR_FAN_SPEED) == "min"
assert state.attributes.get(ATTR_FAN_SPEED_LIST) == ["min", "medium", "high", "max"]
async def test_no_fan_vacuum(hass, mqtt_mock):
"""Test status updates from the vacuum when fan is not supported."""
config = deepcopy(DEFAULT_CONFIG)
del config[mqttvacuum.CONF_FAN_SPEED_LIST]
config[mqttvacuum.CONF_SUPPORTED_FEATURES] = services_to_strings(
mqttvacuum.DEFAULT_SERVICES, SERVICE_TO_STRING
)
assert await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: config})
await hass.async_block_till_done()
message = """{
"battery_level": 54,
"state": "cleaning"
}"""
async_fire_mqtt_message(hass, "vacuum/state", message)
state = hass.states.get("vacuum.mqtttest")
assert state.state == STATE_CLEANING
assert state.attributes.get(ATTR_FAN_SPEED) is None
assert state.attributes.get(ATTR_FAN_SPEED_LIST) is None
assert state.attributes.get(ATTR_BATTERY_LEVEL) == 54
assert state.attributes.get(ATTR_BATTERY_ICON) == "mdi:battery-50"
message = """{
"battery_level": 54,
"state": "cleaning",
"fan_speed": "max"
}"""
async_fire_mqtt_message(hass, "vacuum/state", message)
state = hass.states.get("vacuum.mqtttest")
assert state.state == STATE_CLEANING
assert state.attributes.get(ATTR_FAN_SPEED) is None
assert state.attributes.get(ATTR_FAN_SPEED_LIST) is None
assert state.attributes.get(ATTR_BATTERY_LEVEL) == 54
assert state.attributes.get(ATTR_BATTERY_ICON) == "mdi:battery-50"
message = """{
"battery_level": 61,
"state": "docked"
}"""
async_fire_mqtt_message(hass, "vacuum/state", message)
state = hass.states.get("vacuum.mqtttest")
assert state.state == STATE_DOCKED
assert state.attributes.get(ATTR_BATTERY_ICON) == "mdi:battery-charging-60"
assert state.attributes.get(ATTR_BATTERY_LEVEL) == 61
@pytest.mark.no_fail_on_log_exception
async def test_status_invalid_json(hass, mqtt_mock):
"""Test to make sure nothing breaks if the vacuum sends bad JSON."""
config = deepcopy(DEFAULT_CONFIG)
config[mqttvacuum.CONF_SUPPORTED_FEATURES] = services_to_strings(
mqttvacuum.ALL_SERVICES, SERVICE_TO_STRING
)
assert await async_setup_component(hass, vacuum.DOMAIN, {vacuum.DOMAIN: config})
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "vacuum/state", '{"asdfasas false}')
state = hass.states.get("vacuum.mqtttest")
assert state.state == STATE_UNKNOWN
async def test_availability_when_connection_lost(hass, mqtt_mock):
"""Test availability after MQTT disconnection."""
await help_test_availability_when_connection_lost(
hass, mqtt_mock, vacuum.DOMAIN, DEFAULT_CONFIG_2
)
async def test_availability_without_topic(hass, mqtt_mock):
"""Test availability without defined availability topic."""
await help_test_availability_without_topic(
hass, mqtt_mock, vacuum.DOMAIN, DEFAULT_CONFIG_2
)
async def test_default_availability_payload(hass, mqtt_mock):
"""Test availability by default payload with defined topic."""
await help_test_default_availability_payload(
hass, mqtt_mock, vacuum.DOMAIN, DEFAULT_CONFIG_2
)
async def test_custom_availability_payload(hass, mqtt_mock):
"""Test availability by custom payload with defined topic."""
await help_test_custom_availability_payload(
hass, mqtt_mock, vacuum.DOMAIN, DEFAULT_CONFIG_2
)
async def test_setting_attribute_via_mqtt_json_message(hass, mqtt_mock):
"""Test the setting of attribute via MQTT with JSON payload."""
await help_test_setting_attribute_via_mqtt_json_message(
hass, mqtt_mock, vacuum.DOMAIN, DEFAULT_CONFIG_2
)
async def test_setting_attribute_with_template(hass, mqtt_mock):
"""Test the setting of attribute via MQTT with JSON payload."""
await help_test_setting_attribute_with_template(
hass, mqtt_mock, vacuum.DOMAIN, DEFAULT_CONFIG_2
)
async def test_update_with_json_attrs_not_dict(hass, mqtt_mock, caplog):
"""Test attributes get extracted from a JSON result."""
await help_test_update_with_json_attrs_not_dict(
hass, mqtt_mock, caplog, vacuum.DOMAIN, DEFAULT_CONFIG_2
)
async def test_update_with_json_attrs_bad_json(hass, mqtt_mock, caplog):
"""Test attributes get extracted from a JSON result."""
await help_test_update_with_json_attrs_bad_JSON(
hass, mqtt_mock, caplog, vacuum.DOMAIN, DEFAULT_CONFIG_2
)
async def test_discovery_update_attr(hass, mqtt_mock, caplog):
"""Test update of discovered MQTTAttributes."""
await help_test_discovery_update_attr(
hass, mqtt_mock, caplog, vacuum.DOMAIN, DEFAULT_CONFIG_2
)
async def test_unique_id(hass, mqtt_mock):
"""Test unique id option only creates one vacuum per unique_id."""
config = {
vacuum.DOMAIN: [
{
"platform": "mqtt",
"schema": "state",
"name": "Test 1",
"command_topic": "command-topic",
"unique_id": "TOTALLY_UNIQUE",
},
{
"platform": "mqtt",
"schema": "state",
"name": "Test 2",
"command_topic": "command-topic",
"unique_id": "TOTALLY_UNIQUE",
},
]
}
await help_test_unique_id(hass, vacuum.DOMAIN, config)
async def test_discovery_removal_vacuum(hass, mqtt_mock, caplog):
"""Test removal of discovered vacuum."""
data = '{ "schema": "state", "name": "test",' ' "command_topic": "test_topic"}'
await help_test_discovery_removal(hass, mqtt_mock, caplog, vacuum.DOMAIN, data)
async def test_discovery_update_vacuum(hass, mqtt_mock, caplog):
"""Test update of discovered vacuum."""
data1 = '{ "schema": "state", "name": "Beer",' ' "command_topic": "test_topic"}'
data2 = '{ "schema": "state", "name": "Milk",' ' "command_topic": "test_topic"}'
await help_test_discovery_update(
hass, mqtt_mock, caplog, vacuum.DOMAIN, data1, data2
)
@pytest.mark.no_fail_on_log_exception
async def test_discovery_broken(hass, mqtt_mock, caplog):
"""Test handling of bad discovery message."""
data1 = '{ "schema": "state", "name": "Beer",' ' "command_topic": "test_topic#"}'
data2 = '{ "schema": "state", "name": "Milk",' ' "command_topic": "test_topic"}'
await help_test_discovery_broken(
hass, mqtt_mock, caplog, vacuum.DOMAIN, data1, data2
)
async def test_entity_device_info_with_connection(hass, mqtt_mock):
"""Test MQTT vacuum device registry integration."""
await help_test_entity_device_info_with_connection(
hass, mqtt_mock, vacuum.DOMAIN, DEFAULT_CONFIG_2
)
async def test_entity_device_info_with_identifier(hass, mqtt_mock):
"""Test MQTT vacuum device registry integration."""
await help_test_entity_device_info_with_identifier(
hass, mqtt_mock, vacuum.DOMAIN, DEFAULT_CONFIG_2
)
async def test_entity_device_info_update(hass, mqtt_mock):
"""Test device registry update."""
await help_test_entity_device_info_update(
hass, mqtt_mock, vacuum.DOMAIN, DEFAULT_CONFIG_2
)
async def test_entity_device_info_remove(hass, mqtt_mock):
"""Test device registry remove."""
await help_test_entity_device_info_remove(
hass, mqtt_mock, vacuum.DOMAIN, DEFAULT_CONFIG_2
)
async def test_entity_id_update_subscriptions(hass, mqtt_mock):
"""Test MQTT subscriptions are managed when entity_id is updated."""
await help_test_entity_id_update_subscriptions(
hass, mqtt_mock, vacuum.DOMAIN, DEFAULT_CONFIG_2
)
async def test_entity_id_update_discovery_update(hass, mqtt_mock):
"""Test MQTT discovery update when entity_id is updated."""
await help_test_entity_id_update_discovery_update(
hass, mqtt_mock, vacuum.DOMAIN, DEFAULT_CONFIG_2
)
async def test_entity_debug_info_message(hass, mqtt_mock):
"""Test MQTT debug info."""
await help_test_entity_debug_info_message(
hass, mqtt_mock, vacuum.DOMAIN, DEFAULT_CONFIG_2, payload="{}"
)
|
py
|
1a59eb265e052c573b15b6220dcda01a3a5097d5
|
"""Collection of helper methods.
All containing methods are legacy helpers that should not be used by new
components. Instead call the service directly.
"""
from homeassistant.components.group import ATTR_VISIBLE, DOMAIN, \
SERVICE_SET_VISIBILITY
from homeassistant.const import ATTR_ENTITY_ID
from homeassistant.loader import bind_hass
@bind_hass
def set_visibility(hass, entity_id=None, visible=True):
"""Hide or shows a group."""
data = {ATTR_ENTITY_ID: entity_id, ATTR_VISIBLE: visible}
hass.services.call(DOMAIN, SERVICE_SET_VISIBILITY, data)
|
py
|
1a59eb6c283d783eecc389a93fdf28567702f108
|
import streamlit as st # streamlit run Location100_RF_streamlit.py
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import os
import warnings
from sklearn.model_selection import train_test_split, GridSearchCV, learning_curve, cross_val_score
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn import tree
from sklearn import preprocessing
from sklearn.metrics import r2_score, mean_squared_error
from sklearn.svm import SVR
from sklearn.tree import DecisionTreeRegressor
from scipy import stats
from sklearn.neighbors import KNeighborsRegressor
# import config
st.title('Pepper ML for Chicago area(location 100) by using random forest')
# df = pd.read_csv("C:\PepperPepper\pepperProject.csv", encoding = 'unicode_escape', engine ='python')
url = f'https://raw.githubusercontent.com/LeonZly90/myData/main/pepperProject.csv?token=AG6BQ7M2G3HRK4IT4IU5ZALBD7M3S'
df = pd.read_csv(url, encoding='unicode_escape', engine='python')
df_data = df.copy()
new_sheet = pd.DataFrame(df_data,
columns=['OMOP_COMP_CODE', 'CTRL_JOB', 'STAGE_CODE', 'MARKET_TYPE', 'POTENTIAL_REV_AMT',
'TOTAL_HOURS'])
new_sheet = new_sheet[~new_sheet['MARKET_TYPE'].isin(['Select Market', 'Self Performed Work', 'Self Performed Direct'])]
new_sheet = new_sheet[new_sheet['POTENTIAL_REV_AMT'] > 0]
location_100 = new_sheet[new_sheet.OMOP_COMP_CODE == 100]
location_100 = location_100.drop('OMOP_COMP_CODE', 1)
# st.write('location_100:\n', location_100)
JobHour_by_StageMarket = location_100.groupby(['CTRL_JOB', 'STAGE_CODE', 'MARKET_TYPE'])[
'POTENTIAL_REV_AMT', 'TOTAL_HOURS'].sum().reset_index()
# st.write('JobHour_by_StageMarket:\n', JobHour_by_StageMarket) # [474 rows x 5 columns]
revAmt_Hour0 = JobHour_by_StageMarket.iloc[:, -2:].abs()
# st.write(revAmt_Hour0)
# with st.echo(code_location='below'):
# fig1 = plt.figure(1)
# plt.scatter(revAmt_Hour0['POTENTIAL_REV_AMT'], revAmt_Hour0['TOTAL_HOURS'])
# plt.xlabel('POTENTIAL_REV_AMT')
# plt.ylabel('TOTAL_HOURS')
# plt.show()
# st.write(fig1)
# clean outlier [469 rows x 5 columns]
z_scores = stats.zscore(revAmt_Hour0)
abs_z_scores = np.abs(z_scores)
revAmt_Hour1 = revAmt_Hour0[(abs_z_scores < 3).all(axis=1)]
# st.write(revAmt_Hour1)
# with st.echo(code_location='below'):
# fig2=plt.figure(2)
# plt.scatter(revAmt_Hour1['POTENTIAL_REV_AMT'], revAmt_Hour1['TOTAL_HOURS'])
# plt.xlabel('POTENTIAL_REV_AMT1')
# plt.ylabel('TOTAL_HOURS1')
# plt.show()
# st.write(fig2)
rest = JobHour_by_StageMarket.iloc[:, :-2]
JobHour_by_StageMarket = rest.join(revAmt_Hour1, how='outer')
# @st.cache # 👈 This function will be cached
JobHour_by_StageMarket = JobHour_by_StageMarket.dropna()
# st.write('Now JobHour_by_StageMarket:\n', JobHour_by_StageMarket) # [469 rows x 5 columns]
# @st.cache # 👈 This function will be cached
standardscaler = preprocessing.StandardScaler()
numer_feature = standardscaler.fit_transform(JobHour_by_StageMarket["POTENTIAL_REV_AMT"].values.reshape(-1, 1))
numer_feature = pd.DataFrame(numer_feature, columns=["POTENTIAL_REV_AMT"])
# st.write('numer_feature\n', numer_feature)
# @st.cache # 👈 This function will be cached
ohe = preprocessing.OneHotEncoder(categories='auto')
feature_arr = ohe.fit_transform(JobHour_by_StageMarket[['STAGE_CODE', 'MARKET_TYPE']]).toarray()
feature_labels = ohe.get_feature_names()
# st.write(feature_labels)
feature_labels = np.array(feature_labels, dtype=object).ravel()
# st.write('feature_labels\n', feature_labels)
features = pd.DataFrame(feature_arr, columns=feature_labels)
# st.write('features\n', features)
predictors = np.concatenate([features, numer_feature], axis=1)
# st.write('predictors:\n', predictors)
target = JobHour_by_StageMarket['TOTAL_HOURS']
# st.write('target:\n', target)
X_train, X_test, y_train, y_test = train_test_split(predictors, target, test_size=0.20, random_state=37)
# st.write(X_train.shape)
# st.write(X_test.shape)
# st.write(y_train.shape)
# st.write(y_test.shape)
# (328, 14)
# (141, 14)
# (328,)
# (141,)
# Random Forest # 0.7806525157351498 initial
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import RandomizedSearchCV
import time
start_time = time.time()
# reg = RandomForestRegressor(n_estimators=1000, criterion="mse")
# reg.fit(X_train, y_train)
# y_pred = reg.predict(X_test)
# r2_scoreE = r2_score(y_test, y_pred)
# st.write('\nRandom Forest\n')
# st.write("r2_score: {0}".format(r2_scoreE))
# rmse = mean_squared_error(y_test, y_pred, squared=False)
# st.write("RMSE: {0}".format(rmse))
####################################################################################
# # Number of trees in random forest
# n_estimators = [int(x) for x in np.linspace(start=200, stop=2000, num=10)]
# # Number of features to consider at every split
# max_features = ['auto', 'sqrt']
# # Maximum number of levels in tree
# max_depth = [int(x) for x in np.linspace(10, 110, num=11)]
# max_depth.append(None)
# # Minimum number of samples required to split a node
# min_samples_split = [2, 5, 10]
# # Minimum number of samples required at each leaf node
# min_samples_leaf = [1, 2, 4]
# # Method of selecting samples for training each tree
# bootstrap = [True, False]
# # Create the random grid
#
# random_grid = {'n_estimators': n_estimators,
# 'max_features': max_features,
# 'max_depth': max_depth,
# 'min_samples_split': min_samples_split,
# 'min_samples_leaf': min_samples_leaf,
# 'bootstrap': bootstrap}
#
# rf = RandomForestRegressor()
# rf_random = RandomizedSearchCV(estimator=rf, param_distributions=random_grid, n_iter=100, cv=3, verbose=2,
# random_state=42, n_jobs=-1)
# # Fit the random search model
# rf_random.fit(X_train, y_train)
#
# # search.best_params_ {'n_estimators': 400, 'min_samples_split': 5, 'min_samples_leaf': 1, 'max_features': 'sqrt', 'max_depth': 100, 'bootstrap': True}
# # search.fit(X_train, y_train)
# st.write('\nsearch.best_params_', rf_random.best_params_)
# end_time = time.time() # time 304.75399446487427
# st.write('time', end_time - start_time)
#
#
# best_search = rf_random.best_estimator_
# st.write('best_search\n', best_search)
# reg = best_search
####################################################################################
# search.best_params_ {'n_estimators': 200, 'min_samples_split': 5, 'min_samples_leaf': 4, 'max_features': 'auto', 'max_depth': 80, 'bootstrap': True}
reg = RandomForestRegressor(n_estimators=200, min_samples_split=5, min_samples_leaf=4, max_features='auto',
max_depth=80, bootstrap='True')
# reg = RandomForestRegressor()
# r2_score: 0.7872974759353466
# MSE: 1107.7595622634976
# @st.cache # 👈 This function will be cached
reg.fit(X_train, y_train)
y_pred = reg.predict(X_test)
r2_scoreF = r2_score(y_test, y_pred)
# st.write('\nRF RF RF RF RF RF RF RF RF RF RF RF RF RF RF RF RF RF RF')
# st.write("accur2_score: {0}".format(r2_scoreF)) # r2_score:
mse = mean_squared_error(y_test, y_pred, squared=False)
# st.write("MSE: {0}".format(mse))
x_ax = range(len(y_test))
# with st.echo(code_location='below'):
fig3 = plt.figure(3)
plt.scatter(x_ax, y_test, s=5, color="blue", label="original")
plt.plot(x_ax, y_pred, lw=0.8, color="red", label="predicted")
plt.xlabel('Trained model')
plt.ylabel('HOURS')
plt.legend()
plt.show()
st.write(fig3)
# @st.cache # 👈 This function will be cached
def predict_new_data(test_data):
test_dataframe = pd.DataFrame(columns=JobHour_by_StageMarket.columns[1:3])
# st.write('test_dataframe:\n', test_dataframe)
for index, column in enumerate(test_dataframe.columns):
test_dataframe[column] = [test_data[index]]
# st.write('test_dataframe:\n', test_dataframe)
cate_test_one_hot = ohe.transform(test_dataframe).toarray()
# st.write('cate_test_one_hot\n', cate_test_one_hot)
numer_feature = standardscaler.transform(np.array(test_data[-1]).reshape(-1, 1))
# st.write('numer_test_stand:\n', numer_feature)
test = np.concatenate([cate_test_one_hot, numer_feature], axis=1)
# st.write('test:\n', test)
return reg.predict(test)
# ['STAGE_CODE','MARKET_TYPE',"POTENTIAL_REV_AMT"]
test_data_1 = ["BO", "Higher Education", 30000000] # 355
test_data_2 = ["SALE", "Healthcare", 20236036] # 909
test_data_3 = ["SALE", "Healthcare", 65172520] # 1180
test_data_4 = ["BR", "Healthcare", 297000] # 52
# st.write("For new data forecast1:", str(round(predict_new_data(test_data_1)[0], 2))) # 355 127.86
# st.write("For new data forecast2:", str(round(predict_new_data(test_data_2)[0], 2))) # 909 1536.94
# st.write("For new data forecast3:", str(round(predict_new_data(test_data_3)[0], 2))) # 1180 1385.98
# st.write("For new data forecast4:", str(round(predict_new_data(test_data_4)[0], 2))) # 52 42.82
STAGE_CODE = np.unique(JobHour_by_StageMarket['STAGE_CODE'])
MARKET_TYPE = np.unique(JobHour_by_StageMarket['MARKET_TYPE'])
r2_scoreF = r2_scoreF*100
st.write("Accuracy rate(r2_score): {0}%".format(round(r2_scoreF, 2)))
option1 = st.sidebar.selectbox(
'Choose your STAGE_CODE:',
STAGE_CODE)
st.write('You selected: ', option1)
option2 = st.sidebar.selectbox(
'Choose your MARKET_TYPE:',
MARKET_TYPE)
st.write('You selected: ', option2)
option3 = st.sidebar.number_input(
'Put your POTENTIAL_REV_AMT:',
)
st.write('You selected: $', option3)
test_data = [option1, option2, option3]
if float(test_data[2]) <= 0.00:
res = 0
else:
# st.sidebar.write('You want to predict:', test_data)
res = round(predict_new_data(test_data)[0], 2)
st.sidebar.write("Estimate:", res, 'hours.')
st.write('Estimate:', res, 'project hours.')
|
py
|
1a59ebc4500e15c1b62765522913b84719229404
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# PapersBot
#
# purpose: read journal RSS feeds and tweet selected entries
# license: MIT License
# author: Nina Miolane
# e-mail: [email protected]
# inspired by: https://github.com/fxcoudert/PapersBot
import imghdr
import json
import os
import random
import re
import sys
import tempfile
import time
import urllib
import yaml
import bs4
import feedparser
import tweepy
# This is the regular expression that selects the papers of interest
regex = re.compile(r"""( geometric.deep.learning |
geometric.machine.learning |
geometric.neural.net |
geometric.statistics |
geomstats |
geoopt |
hyperbolic.data |
non-euclidean.data |
(?=.*non-euclidean)(?=.*deep.learning) |
(?=.*non-euclidean)(?=.*machine.learning) |
(?=.*non-euclidean)(?=.*neural.net) |
(?=.*non-euclidean)(?=.*statistics) |
manopt |
mctorch |
(?=.*riemannian)(?=.*data) |
(?=.*riemannian)(?=.*deep.learning) |
(?=.*riemannian)(?=.*statistic) |
(?=.*riemannian)(?=.*machine.learning) |
(?=.*riemannian)(?=.*neural.net) |
theanogeometry
)
""", re.IGNORECASE | re.VERBOSE)
# We select entries based on title or summary (abstract, for some feeds)
def entryMatches(entry):
# Malformed entry
if "title" not in entry:
return False
if regex.search(entry.title):
return True
if "summary" in entry:
return regex.search(entry.summary)
else:
return False
# Find the URL for an image associated with the entry
def findImage(entry):
if "description" not in entry:
return
soup = bs4.BeautifulSoup(entry.description, "html.parser")
img = soup.find("img")
if img:
img = img["src"]
if len(img) == 0:
return
# If address is relative, append root URL
if img[0] == "/":
p = urllib.parse.urlparse(entry.id)
img = f"{p.scheme}://{p.netloc}" + img
return img
# Convert string from HTML to plain text
def htmlToText(s):
return bs4.BeautifulSoup(s, "html.parser").get_text()
def downloadImage(url):
if not url:
return None
try:
img, _ = urllib.request.urlretrieve(url)
except Exception:
return None
ext = imghdr.what(img)
res = img + "." + ext
os.rename(img, res)
# Images smaller than 4 KB have a problem, and Twitter will complain
if os.path.getsize(res) < 4096:
os.remove(res)
return None
return res
# Connect to Twitter and authenticate
# Credentials are passed in the environment,
# or stored in "credentials.yml" which contains four lines:
# CONSUMER_KEY: "x1F3s..."
# CONSUMER_SECRET: "3VNg..."
# ACCESS_KEY: "7109..."
# ACCESS_SECRET: "AdnA..."
#
def initTwitter():
if 'CONSUMER_KEY' in os.environ:
cred = {'CONSUMER_KEY': os.environ['CONSUMER_KEY'],
'CONSUMER_SECRET': os.environ['CONSUMER_SECRET'],
'ACCESS_KEY': os.environ['ACCESS_KEY'],
'ACCESS_SECRET': os.environ['ACCESS_SECRET']}
else:
with open("credentials.yml", "r") as f:
cred = yaml.safe_load(f)
auth = tweepy.OAuthHandler(cred["CONSUMER_KEY"], cred["CONSUMER_SECRET"])
auth.set_access_token(cred["ACCESS_KEY"], cred["ACCESS_SECRET"])
return tweepy.API(auth)
def getTwitterConfig(api):
# Check for cached configuration, no more than a day old
if os.path.isfile("twitter_config.dat"):
mtime = os.stat("twitter_config.dat").st_mtime
if time.time() - mtime < 24 * 60 * 60:
with open("twitter_config.dat", "r") as f:
return json.load(f)
# Otherwise, query the Twitter API and cache the result
config = api.configuration()
with open("twitter_config.dat", "w") as f:
json.dump(config, f)
return config
# Read our list of feeds from file
def readFeedsList():
with open("feeds.txt", "r") as f:
feeds = [s.partition("#")[0].strip() for s in f]
return [s for s in feeds if s]
# Remove unwanted text some journals insert into the feeds
def cleanText(s):
# Annoying ASAP tags
s = s.replace("[ASAP]", "")
# Some feeds have LF characeters
s = s.replace("\x0A", "")
# Remove (arXiv:1903.00279v1 [cond-mat.mtrl-sci])
s = re.sub(r"\(arXiv:.+\)", "", s)
# Remove multiple spaces, leading and trailing space
return re.sub("\\s\\s+", " ", s).strip()
# Read list of feed items already posted
def readPosted():
try:
with open("posted.dat", "r") as f:
return f.read().splitlines()
except Exception:
return []
class PapersBot:
posted = []
n_seen = 0
n_tweeted = 0
def __init__(self, doTweet=True):
self.feeds = readFeedsList()
self.posted = readPosted()
# Read parameters from configuration file
try:
with open("config.yml", "r") as f:
config = yaml.safe_load(f)
except Except:
config = {}
self.throttle = config.get("throttle", 0)
self.wait_time = config.get("wait_time", 5)
self.shuffle_feeds = config.get("shuffle_feeds", True)
self.blacklist = config.get("blacklist", [])
self.blacklist = [re.compile(s) for s in self.blacklist]
# Shuffle feeds list
if self.shuffle_feeds:
random.shuffle(self.feeds)
# Connect to Twitter, unless requested not to
if doTweet:
self.api = initTwitter()
else:
self.api = None
# Determine maximum tweet length
if doTweet:
twconfig = getTwitterConfig(self.api)
urllen = max(twconfig["short_url_length"], twconfig["short_url_length_https"])
imglen = twconfig["characters_reserved_per_media"]
else:
urllen = 23
imglen = 24
self.maxlength = 280 - (urllen + 1) - imglen
# Start-up banner
print(f"This is PapersBot running at {time.strftime('%Y-%m-%d %H:%M:%S %Z')}")
if self.api:
timeline = self.api.user_timeline(count=1)
if len(timeline) > 0:
print(f"Last tweet was posted at {timeline[0].created_at} (UTC)")
else:
print(f"No tweets posted yet? Welcome, new user!")
print(f"Feed list has {len(self.feeds)} feeds\n")
# Add to tweets posted
def addToPosted(self, url):
with open("posted.dat", "a+") as f:
print(url, file=f)
self.posted.append(url)
# Send a tweet for a given feed entry
def sendTweet(self, entry):
title = cleanText(htmlToText(entry.title))
length = self.maxlength
# Usually the ID is the canonical URL, but not always
if entry.id[:8] == "https://" or entry.id[:7] == "http://":
url = entry.id
else:
url = entry.link
# URL may be malformed
if not (url[:8] == "https://" or url[:7] == "http://"):
print(f"INVALID URL: {url}\n")
return
tweet_body = title[:length] + " " + url
# URL may match our blacklist
for regexp in self.blacklist:
if regexp.search(url):
print(f"BLACKLISTED: {tweet_body}\n")
self.addToPosted(entry.id)
return
media = None
image = findImage(entry)
image_file = downloadImage(image)
if image_file:
print(f"IMAGE: {image}")
if self.api:
media = [self.api.media_upload(image_file).media_id]
os.remove(image_file)
print(f"TWEET: {tweet_body}\n")
if self.api:
try:
self.api.update_status(tweet_body, media_ids=media)
except tweepy.error.TweepError as e:
if e.api_code == 187:
print("ERROR: Tweet refused as duplicate\n")
else:
print(f"ERROR: Tweet refused, {e.reason}\n")
sys.exit(1)
self.addToPosted(entry.id)
self.n_tweeted += 1
if self.api:
time.sleep(self.wait_time)
# Main function, iterating over feeds and posting new items
def run(self):
for feed in self.feeds:
parsed_feed = feedparser.parse(feed)
for entry in parsed_feed.entries:
if entryMatches(entry):
self.n_seen += 1
# If no ID provided, use the link as ID
if "id" not in entry:
entry.id = entry.link
if entry.id not in self.posted:
self.sendTweet(entry)
# Bail out if we have reached max number of tweets
if self.throttle > 0 and self.n_tweeted >= self.throttle:
print(f"Max number of papers met ({self.throttle}), stopping now")
return
# Print statistics of a given run
def printStats(self):
print(f"Number of relevant papers: {self.n_seen}")
print(f"Number of papers tweeted: {self.n_tweeted}")
# Print out the n top tweets (most liked and RT'ed)
def printTopTweets(self, count=20):
tweets = self.api.user_timeline(count=200)
oldest = tweets[-1].created_at
print(f"Top {count} recent tweets, by number of RT and likes, since {oldest}:\n")
tweets = [(t.retweet_count + t.favorite_count, t.id, t) for t in tweets]
tweets.sort(reverse=True)
for _, _, t in tweets[0:count]:
url = f"https://twitter.com/{t.user.screen_name}/status/{t.id}"
print(f"{t.retweet_count} RT {t.favorite_count} likes: {url}")
print(f" {t.created_at}")
print(f" {t.text}\n")
def main():
# Make sure all options are correctly typed
options_allowed = ["--do-not-tweet", "--top-tweets"]
for arg in sys.argv[1:]:
if arg not in options_allowed:
print(f"Unknown option: {arg}")
sys.exit(1)
# Initialize our bot
doTweet = "--do-not-tweet" not in sys.argv
bot = PapersBot(doTweet)
# We can print top tweets
if "--top-tweets" in sys.argv:
bot.printTopTweets()
sys.exit(0)
bot.run()
bot.printStats()
if __name__ == "__main__":
main()
|
py
|
1a59ef263667e2f80c98ba21e9b1b7c8bc628e4e
|
"""
Copyright 2017 Robin Verschueren, 2017 Akshay Agrawal
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import cvxpy.settings as s
from cvxpy.constraints import SOC, ExpCone, PSD, Zero, NonNeg
from cvxpy.reductions.cvx_attr2constr import convex_attributes
from cvxpy.reductions.dcp2cone.cone_matrix_stuffing import ParamConeProg
from cvxpy.reductions.solution import Solution, failure_solution
from cvxpy.reductions.solvers.solver import Solver
from cvxpy.reductions.solvers import utilities
import numpy as np
import scipy.sparse as sp
# NOTE(akshayka): Small changes to this file can lead to drastic
# performance regressions. If you are making a change to this file,
# make sure to run cvxpy/tests/test_benchmarks.py to ensure that you have
# not introduced a regression.
class LinearOperator(object):
"""A wrapper for linear operators."""
def __init__(self, linear_op, shape):
if sp.issparse(linear_op):
self._matmul = lambda X: linear_op @ X
else:
self._matmul = linear_op
self.shape = shape
def __call__(self, X):
return self._matmul(X)
def as_linear_operator(linear_op):
if isinstance(linear_op, LinearOperator):
return linear_op
elif sp.issparse(linear_op):
return LinearOperator(linear_op, linear_op.shape)
def as_block_diag_linear_operator(matrices):
"""Block diag of SciPy sparse matrices or linear operators."""
linear_operators = [as_linear_operator(op) for op in matrices]
nrows = [op.shape[0] for op in linear_operators]
ncols = [op.shape[1] for op in linear_operators]
m, n = sum(nrows), sum(ncols)
col_indices = np.append(0, np.cumsum(ncols))
def matmul(X):
outputs = []
for i, op in enumerate(linear_operators):
Xi = X[col_indices[i]:col_indices[i + 1]]
outputs.append(op(Xi))
return sp.vstack(outputs)
return LinearOperator(matmul, (m, n))
class ConicSolver(Solver):
"""Conic solver class with reduction semantics
"""
# The key that maps to ConeDims in the data returned by apply().
DIMS = "dims"
# Every conic solver must support Zero and NonNeg constraints.
SUPPORTED_CONSTRAINTS = [Zero, NonNeg]
# Some solvers cannot solve problems that do not have constraints.
# For such solvers, REQUIRES_CONSTR should be set to True.
REQUIRES_CONSTR = False
EXP_CONE_ORDER = None
def accepts(self, problem):
return (isinstance(problem, ParamConeProg)
and (self.MIP_CAPABLE or not problem.is_mixed_integer())
and not convex_attributes([problem.x])
and (len(problem.constraints) > 0 or not self.REQUIRES_CONSTR)
and all(type(c) in self.SUPPORTED_CONSTRAINTS for c in
problem.constraints))
@staticmethod
def get_spacing_matrix(shape, spacing, streak, num_blocks, offset):
"""Returns a sparse matrix that spaces out an expression.
Parameters
----------
shape : tuple
(rows in matrix, columns in matrix)
spacing : int
The number of rows between the start of each non-zero block.
streak: int
The number of elements in each block.
num_blocks : int
The number of non-zero blocks.
offset : int
The number of zero rows at the beginning of the matrix.
Returns
-------
SciPy CSC matrix
A sparse matrix
"""
num_values = num_blocks * streak
val_arr = np.ones(num_values, dtype=np.float64)
streak_plus_spacing = streak + spacing
row_arr = np.arange(0, num_blocks * streak_plus_spacing).reshape(
num_blocks, streak_plus_spacing)[:, :streak].flatten() + offset
col_arr = np.arange(num_values)
return sp.csc_matrix((val_arr, (row_arr, col_arr)), shape)
def psd_format_mat(self, constr):
"""Return a matrix to multiply by PSD constraint coefficients.
"""
# Default is identity.
return sp.eye(constr.size, format='csc')
def format_constraints(self, problem, exp_cone_order):
"""
Returns a ParamConeProg whose problem data tensors will yield the
coefficient "A" and offset "b" for the constraint in the following
formats:
Linear equations: (A, b) such that A * x + b == 0,
Linear inequalities: (A, b) such that A * x + b >= 0,
Second order cone: (A, b) such that A * x + b in SOC,
Exponential cone: (A, b) such that A * x + b in EXP,
Semidefinite cone: (A, b) such that A * x + b in PSD,
The CVXPY standard for the exponential cone is:
K_e = closure{(x,y,z) | z >= y * exp(x/y), y>0}.
Whenever a solver uses this convention, EXP_CONE_ORDER should be
[0, 1, 2].
The CVXPY standard for the second order cone is:
SOC(n) = { x : x[0] >= norm(x[1:n], 2) }.
All currently supported solvers use this convention.
Args:
problem : ParamConeProg
The problem that is the provenance of the constraint.
exp_cone_order: list
A list indicating how the exponential cone arguments are ordered.
Returns:
ParamConeProg with structured A.
"""
# Create a matrix to reshape constraints, then replicate for each
# variable entry.
restruct_mat = [] # Form a block diagonal matrix.
for constr in problem.constraints:
total_height = sum([arg.size for arg in constr.args])
if type(constr) == Zero:
restruct_mat.append(-sp.eye(constr.size, format='csr'))
elif type(constr) == NonNeg:
restruct_mat.append(sp.eye(constr.size, format='csr'))
elif type(constr) == SOC:
# Group each t row with appropriate X rows.
assert constr.axis == 0, 'SOC must be lowered to axis == 0'
# Interleave the rows of coeffs[0] and coeffs[1]:
# coeffs[0][0, :]
# coeffs[1][0:gap-1, :]
# coeffs[0][1, :]
# coeffs[1][gap-1:2*(gap-1), :]
t_spacer = ConicSolver.get_spacing_matrix(
shape=(total_height, constr.args[0].size),
spacing=constr.args[1].shape[0],
streak=1,
num_blocks=constr.args[0].size,
offset=0,
)
X_spacer = ConicSolver.get_spacing_matrix(
shape=(total_height, constr.args[1].size),
spacing=1,
streak=constr.args[1].shape[0],
num_blocks=constr.args[0].size,
offset=1,
)
restruct_mat.append(sp.hstack([t_spacer, X_spacer]))
elif type(constr) == ExpCone:
arg_mats = []
for i, arg in enumerate(constr.args):
space_mat = ConicSolver.get_spacing_matrix(
shape=(total_height, arg.size),
spacing=len(exp_cone_order) - 1,
streak=1,
num_blocks=arg.size,
offset=exp_cone_order[i],
)
arg_mats.append(space_mat)
restruct_mat.append(sp.hstack(arg_mats))
elif type(constr) == PSD:
restruct_mat.append(self.psd_format_mat(constr))
else:
raise ValueError("Unsupported constraint type.")
# Form new ParamConeProg
if restruct_mat:
# TODO(akshayka): profile to see whether using linear operators
# or bmat is faster
restruct_mat = as_block_diag_linear_operator(restruct_mat)
# this is equivalent to but _much_ faster than:
# restruct_mat_rep = sp.block_diag([restruct_mat]*(problem.x.size + 1))
# restruct_A = restruct_mat_rep * problem.A
reshaped_A = problem.A.reshape(restruct_mat.shape[1], -1, order='F').tocsr()
restructured_A = restruct_mat(reshaped_A).tocoo()
# Because of a bug in scipy versions < 1.20, `reshape`
# can overflow if indices are int32s.
restructured_A.row = restructured_A.row.astype(np.int64)
restructured_A.col = restructured_A.col.astype(np.int64)
restructured_A = restructured_A.reshape(
restruct_mat.shape[0] * (problem.x.size + 1),
problem.A.shape[1], order='F')
else:
restructured_A = problem.A
new_param_cone_prog = ParamConeProg(problem.c,
problem.x,
restructured_A,
problem.variables,
problem.var_id_to_col,
problem.constraints,
problem.parameters,
problem.param_id_to_col,
formatted=True)
return new_param_cone_prog
def invert(self, solution, inverse_data):
"""Returns the solution to the original problem given the inverse_data.
"""
status = solution['status']
if status in s.SOLUTION_PRESENT:
opt_val = solution['value']
primal_vars = {inverse_data[self.VAR_ID]: solution['primal']}
eq_dual = utilities.get_dual_values(
solution['eq_dual'],
utilities.extract_dual_value,
inverse_data[Solver.EQ_CONSTR])
leq_dual = utilities.get_dual_values(
solution['ineq_dual'],
utilities.extract_dual_value,
inverse_data[Solver.NEQ_CONSTR])
eq_dual.update(leq_dual)
dual_vars = eq_dual
return Solution(status, opt_val, primal_vars, dual_vars, {})
else:
return failure_solution(status)
|
py
|
1a59ef63cdd099f71b9d8c23e8004b294a129723
|
import stackless
#
# 'Sleep helper functions
#
sleepingTasklets = []
sleepingTicks = 0
def Sleep(secondsToWait):
channel = stackless.channel()
endTime = sleepingTicks + secondsToWait
sleepingTasklets.append((endTime, channel))
sleepingTasklets.sort()
# Block until we get sent an awakening notification.
channel.receive()
def ManageSleepingTasklets():
global sleepingTicks
while 1:
if len(sleepingTasklets):
endTime = sleepingTasklets[0][0]
while endTime <= sleepingTicks:
channel = sleepingTasklets[0][1]
del sleepingTasklets[0]
# We have to send something, but it doesn't matter
# what as it is not used.
channel.send(None)
endTime = sleepingTasklets[0][0] # check next
sleepingTicks += 1
print "1 second passed"
stackless.schedule()
stackless.tasklet(ManageSleepingTasklets)()
#
# Factory implementation
#
class storeroom:
def __init__(self,name,product,unit,count):
self.product = product
self.unit = unit
self.count = count
self.name = name
def get(self,count):
while count > self.count: #reschedule until we have enough
print "%s doesn't have enough %s to deliver yet" % (self.name,
self.product)
stackless.schedule()
self.count -= count
return count
def put(self,count):
self.count += count
def run(self):
pass
rivetStoreroom = storeroom("rivetStoreroom","rivets","#",1000)
plasticStoreroom = storeroom("plastic Storeroom","plastic pellets","lb",100)
class injectionMolder:
def __init__(self,name,partName,plasticSource,plasticPerPart,timeToMold):
self.partName = partName
self.plasticSource = plasticSource
self.plasticPerPart = plasticPerPart
self.timeToMold = timeToMold
self.plastic = 0
self.items = 0
self.name = name
stackless.tasklet(self.run)()
def get(self,items):
while items > self.items: #reschedule until we have enough
print "%s doesn't have enough %s to deliver yet" % (self.name,
self.partName)
stackless.schedule()
self.items -= items
return items
def run(self):
while 1:
print "%s starts making new part %s" % (self.name,self.partName)
if self.plastic < self.plasticPerPart:
print "%s getting more plastic"
self.plastic += self.plasticSource.get(self.plasticPerPart * 10)
self.plastic -= self.plasticPerPart
Sleep(self.timeToMold)
print "%s done molding after %s seconds" % (self.partName,
self.timeToMold)
self.items += 1
print "%s finished making part" % self.name
stackless.schedule()
armMolder = injectionMolder("arm Molder", "arms",plasticStoreroom,0.2,5)
legMolder = injectionMolder("leg Molder", "leg",plasticStoreroom,0.2,5)
headMolder = injectionMolder("head Molder","head",plasticStoreroom,0.1,5)
torsoMolder = injectionMolder("torso Molder","torso",plasticStoreroom,0.5,10)
class assembler:
def __init__(self,name,partAsource,partBsource,rivetSource,timeToAssemble):
self.partAsource = partAsource
self.partBsource = partBsource
self.rivetSource = rivetSource
self.timeToAssemble = timeToAssemble
self.itemA = 0
self.itemB = 0
self.items = 0
self.rivets = 0
self.name = name
stackless.tasklet(self.run)()
def get(self,items):
while items > self.items: #reschedule until we have enough
print "Don't have a %s to deliver yet" % (self.name)
stackless.schedule()
self.items -= items
return items
def run(self):
while 1:
print "%s starts assembling new part" % self.name
self.itemA += self.partAsource.get(1)
self.itemB += self.partBsource.get(1)
print "%s starting to assemble" % self.name
Sleep(self.timeToAssemble)
print "%s done assembling after %s" % (self.name, self.timeToAssemble)
self.items += 1
print "%s finished assembling part" % self.name
stackless.schedule()
legAssembler = assembler("leg Assembler",torsoMolder,legMolder,rivetStoreroom,2)
armAssembler = assembler("arm Assembler", armMolder,legAssembler,rivetStoreroom,2)
torsoAssembler = assembler("torso Assembler", headMolder,armAssembler,
rivetStoreroom,3)
def pause():
while 1:
raw_input("Press <ENTER> to continue...")
print "\n\n\n"
stackless.schedule()
stackless.tasklet(pause)()
def run():
stackless.run()
if __name__ == "__main__":
run()
|
py
|
1a59f17d6f3461df55cf3dc93dabca20ed5c01a3
|
#!/usr/bin/env python
from plasTeX import Command, Environment
from plasTeX.Base.LaTeX.Arrays import Array
from plasTeX.Base.LaTeX.Math import EqnarrayStar, equation, eqnarray
#### Imports Added by Tim ####
from plasTeX.Base.LaTeX.Math import math
from plasTeX.Base.LaTeX.Crossref import ref
def ProcessOptions(options, document):
import amstext
document.context.importMacros(vars(amstext))
class pmatrix(Array):
pass
class _AMSEquation(eqnarray):
pass
class _AMSEquationStar(EqnarrayStar):
macroName = None
class align(_AMSEquation):
pass
class AlignStar(_AMSEquationStar):
macroName = 'align*'
class gather(_AMSEquation):
pass
class GatherStar(_AMSEquationStar):
macroName = 'gather*'
class falign(_AMSEquation):
pass
class FAlignStar(_AMSEquationStar):
macroName = 'falign*'
class multiline(_AMSEquation):
pass
class MultilineStar(_AMSEquationStar):
macroName = 'multiline*'
class alignat(_AMSEquation):
pass
class AlignatStar(_AMSEquationStar):
macroName = 'alignat*'
class split(_AMSEquation):
pass
#### Added by Tim ####
class EquationStar(_AMSEquationStar):
macroName = 'equation*'
class aligned(_AMSEquation):
pass
class cases(_AMSEquation):
pass
class alignat(_AMSEquation):
args = 'column:int'
class AlignatStar(_AMSEquationStar):
args = 'column:int'
macroName = 'alignat*'
class flalign(_AMSEquation):
pass
class FlalignStar(_AMSEquationStar):
macroName = 'flalign*'
class subequations(_AMSEquation):
pass
class xalignat(alignat):
pass
class multline(multiline):
pass
class MultlineStar(MultilineStar):
macroName = 'multline*'
class matrix(Array):
pass
class vmatrix(Array):
pass
class Vmatrix(Array):
pass
class bmatrix(Array):
pass
class Bmatrix(Array):
pass
#### Inline Math
class smallmatrix(math):
pass
class dddot(math):
pass
class ddddot(math):
pass
class eqref(ref):
pass
|
py
|
1a59f292497b2d5ac3f0b7de4ea82489d3f63434
|
import torch.nn as nn
import torch
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 3, 3)
self.relu1 = nn.ReLU()
self.mp1 = nn.MaxPool2d(2)
self.conv2 = nn.Conv2d(3, 3, 3)
self.relu2 = nn.ReLU()
self.mp2 = nn.MaxPool2d(2)
self.flatten = nn.Flatten()
self.fc1 = nn.Linear(108, 64)
self.fc2 = nn.Linear(64, 32)
self.fc3 = nn.Linear(32, 10)
def forward(self, x):
out = self.conv1(x)
out = self.relu1(out)
out = self.mp1(out)
out = self.conv2(out)
out = self.relu2(out)
out = self.mp2(out)
out = self.flatten(out)
out = self.fc1(out)
out = self.fc2(out)
out = self.fc3(out)
return out
if __name__ == "__main__":
model = Model()
x = torch.ones((64, 3, 32, 32))
print(model.forward(x))
|
py
|
1a59f3de51f5fc969a5bececa4b0720af491b1c6
|
'''
Helpers for hpi doctor/stats functionality.
'''
import collections
import importlib
import inspect
import sys
import typing
from typing import Optional
from .common import StatsFun, Stats, stat
# TODO maybe could be enough to annotate OUTPUTS or something like that?
# then stats could just use them as hints?
def guess_stats(module_name: str) -> Optional[StatsFun]:
module = importlib.import_module(module_name)
mfunctions = inspect.getmembers(module, inspect.isfunction)
functions = {k: v for k, v in mfunctions if is_data_provider(v)}
if len(functions) == 0:
return None
def auto_stats() -> Stats:
return {k: stat(v) for k, v in functions.items()}
return auto_stats
def is_data_provider(fun) -> bool:
"""
1. returns iterable or something like that
2. takes no arguments? (otherwise not callable by stats anyway?)
"""
# todo maybe for 2 allow default arguments? not sure
# one example which could benefit is my.pdfs
if fun is None:
return False
# todo. uh.. very similar to what cachew is trying to do?
try:
sig = inspect.signature(fun)
except ValueError: # not a function?
return False
if len(sig.parameters) > 0:
return False
return_type = sig.return_annotation
return type_is_iterable(return_type)
def test_is_data_provider() -> None:
idp = is_data_provider
assert not idp(None)
assert not idp(int)
def no_return_type():
return [1, 2 ,3]
assert not idp(no_return_type)
lam = lambda: [1, 2]
assert not idp(lam)
def has_extra_args(count) -> typing.List[int]:
return list(range(count))
assert not idp(has_extra_args)
def has_return_type() -> typing.Sequence[str]:
return ['a', 'b', 'c']
assert idp(has_return_type)
def type_is_iterable(type_spec) -> bool:
if sys.version_info[1] < 8:
# there is no get_origin before 3.8, and retrofitting gonna be a lot of pain
return any(x in str(type_spec) for x in ['List', 'Sequence', 'Iterable', 'Iterator'])
origin = typing.get_origin(type_spec)
if origin is None:
return False
# explicitly exclude dicts... not sure?
if issubclass(origin, collections.abc.Mapping):
return False
if issubclass(origin, collections.abc.Iterable):
return True
return False
# todo docstring test?
def test_type_is_iterable() -> None:
from typing import List, Sequence, Iterable, Dict, Any
fun = type_is_iterable
assert not fun(None)
assert not fun(int)
assert not fun(Any)
assert not fun(Dict[int, int])
assert fun(List[int])
assert fun(Sequence[Dict[str, str]])
assert fun(Iterable[Any])
|
py
|
1a59f408df264b6932509b5e82241ec973d900fb
|
"""access_control URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.urls import include
from django_app_permissions_access_request import views
urlpatterns = [
path('approvals/', views.ApprovalsView.as_view(), name='django_app_permissions_access_request.approvals'),
path('approval/', views.ApprovalView.as_view(), name='django_app_permissions_access_request.approval'),
path('deny/', views.DenyView.as_view(), name='django_app_permissions_access_request.deny'),
path('request/', views.RequestView.as_view(), name='django_app_permissions_access_request.request_access'),
path('', views.RequestListView.as_view(), name='django_app_permissions_access_request.request_list'),
]
|
py
|
1a59f41d97274bfff12d4dcd5978d7c6420c220f
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Type
from typing import Union
from types import ModuleType
from typing import TYPE_CHECKING
from libcloud.container.types import Provider
from libcloud.common.providers import get_driver as _get_provider_driver
from libcloud.common.providers import set_driver as _set_provider_driver
if TYPE_CHECKING:
# NOTE: This is needed to avoid having setup.py depend on requests
from libcloud.container.base import ContainerDriver
DRIVERS = {
Provider.DUMMY:
('libcloud.container.drivers.dummy', 'DummyContainerDriver'),
Provider.DOCKER:
('libcloud.container.drivers.docker', 'DockerContainerDriver'),
Provider.JOYENT:
('libcloud.container.drivers.joyent', 'JoyentContainerDriver'),
Provider.ECS:
('libcloud.container.drivers.ecs', 'ElasticContainerDriver'),
Provider.KUBERNETES:
('libcloud.container.drivers.kubernetes', 'KubernetesContainerDriver'),
Provider.LXD:
('libcloud.container.drivers.lxd', 'LXDContainerDriver'),
Provider.RANCHER:
('libcloud.container.drivers.rancher', 'RancherContainerDriver'),
Provider.GKE:
('libcloud.container.drivers.gke', 'GKEContainerDriver')
}
def get_driver(provider):
# type: (Union[Provider, str]) -> Type[ContainerDriver]
return _get_provider_driver(drivers=DRIVERS, provider=provider)
def set_driver(provider, module, klass):
# type: (Union[Provider, str], ModuleType, type) -> Type[ContainerDriver]
return _set_provider_driver(drivers=DRIVERS, provider=provider,
module=module, klass=klass)
|
py
|
1a59f421952c6c930f8a465e41bfb4564635e91a
|
"""Mock Server for simple calls the cli and public api make"""
from flask import Flask, request, g, jsonify
import os
import sys
from datetime import datetime, timedelta
import json
import yaml
import six
# HACK: restore first two entries of sys path after wandb load
save_path = sys.path[:2]
import wandb
sys.path[0:0] = save_path
import logging
from six.moves import urllib
import threading
from tests.utils.mock_requests import RequestsMock, InjectRequestsParse
def default_ctx():
return {
"fail_graphql_count": 0, # used via "fail_graphql_times"
"fail_storage_count": 0, # used via "fail_storage_times"
"rate_limited_count": 0, # used via "rate_limited_times"
"page_count": 0,
"page_times": 2,
"requested_file": "weights.h5",
"current_run": None,
"files": {},
"k8s": False,
"resume": False,
"file_bytes": {},
"manifests_created": [],
"artifacts_by_id": {},
"upsert_bucket_count": 0,
}
def mock_server(mocker):
ctx = default_ctx()
app = create_app(ctx)
mock = RequestsMock(app, ctx)
# We mock out all requests libraries, couldn't find a way to mock the core lib
sdk_path = "wandb.sdk"
mocker.patch("gql.transport.requests.requests", mock)
mocker.patch("wandb.wandb_sdk.internal.file_stream.requests", mock)
mocker.patch("wandb.wandb_sdk.internal.internal_api.requests", mock)
mocker.patch("wandb.wandb_sdk.internal.update.requests", mock)
mocker.patch("wandb.wandb_sdk.internal.sender.requests", mock)
mocker.patch("wandb.apis.internal_runqueue.requests", mock)
mocker.patch("wandb.apis.public.requests", mock)
mocker.patch("wandb.util.requests", mock)
mocker.patch("wandb.wandb_sdk.wandb_artifacts.requests", mock)
print("Patched requests everywhere", os.getpid())
return mock
def run(ctx):
if ctx["resume"]:
now = datetime.now()
created_at = (now - timedelta(days=1)).isoformat()
else:
created_at = datetime.now().isoformat()
stopped = ctx.get("stopped", False)
# for wandb_tests::wandb_restore_name_not_found
# if there is a fileName query, and this query is for nofile.h5
# return an empty file. otherwise, return the usual weights.h5
if ctx.get("graphql"):
fileNames = ctx["graphql"][-1]["variables"].get("fileNames")
else:
fileNames = None
if fileNames == ["nofile.h5"]:
fileNode = {
"id": "file123",
"name": "nofile.h5",
"sizeBytes": 0,
"md5": "0",
"url": request.url_root + "/storage?file=nofile.h5",
}
else:
fileNode = {
"id": "file123",
"name": ctx["requested_file"],
"sizeBytes": 20,
"md5": "XXX",
"url": request.url_root + "/storage?file=%s" % ctx["requested_file"],
"directUrl": request.url_root
+ "/storage?file=%s&direct=true" % ctx["requested_file"],
}
return {
"id": "test",
"name": "test",
"displayName": "beast-bug-33",
"state": "running",
"config": '{"epochs": {"value": 10}}',
"group": "A",
"jobType": "test",
"description": "",
"systemMetrics": '{"cpu": 100}',
"summaryMetrics": '{"acc": 100, "loss": 0}',
"fileCount": 1,
"history": [
'{"acc": 10, "loss": 90}',
'{"acc": 20, "loss": 80}',
'{"acc": 30, "loss": 70}',
],
"events": ['{"cpu": 10}', '{"cpu": 20}', '{"cpu": 30}'],
"files": {
# Special weights url by default, if requesting upload we set the name
"edges": [{"node": fileNode,}]
},
"sampledHistory": [[{"loss": 0, "acc": 100}, {"loss": 1, "acc": 0}]],
"shouldStop": False,
"failed": False,
"stopped": stopped,
"running": True,
"tags": [],
"notes": None,
"sweepName": None,
"createdAt": created_at,
"updatedAt": datetime.now().isoformat(),
}
def artifact(
ctx,
collection_name="mnist",
state="COMMITTED",
request_url_root="",
id_override=None,
):
_id = str(ctx["page_count"]) if id_override is None else id_override
return {
"id": _id,
"digest": "abc123",
"description": "",
"state": state,
"size": 10000,
"createdAt": datetime.now().isoformat(),
"updatedAt": datetime.now().isoformat(),
"versionIndex": ctx["page_count"],
"labels": [],
"metadata": "{}",
"aliases": [
{
"artifactCollectionName": collection_name,
"alias": "v%i" % ctx["page_count"],
}
],
"artifactSequence": {"name": collection_name,},
"currentManifest": {
"file": {
"directUrl": request_url_root
+ "/storage?file=wandb_manifest.json&id={}".format(_id)
}
},
}
def paginated(node, ctx, extra={}):
next_page = False
ctx["page_count"] += 1
if ctx["page_count"] < ctx["page_times"]:
next_page = True
edge = {"node": node, "cursor": "abc123"}
edge.update(extra)
return {
"edges": [edge],
"pageInfo": {"endCursor": "abc123", "hasNextPage": next_page},
}
class CTX(object):
"""This is a silly threadsafe wrapper for getting ctx into the server
NOTE: This will stop working for live_mock_server if we make pytest run
in parallel.
"""
lock = threading.Lock()
STATE = None
def __init__(self, ctx):
self.ctx = ctx
def get(self):
return self.ctx
def set(self, ctx):
self.ctx = ctx
CTX.persist(self)
return self.ctx
@classmethod
def persist(cls, instance):
with cls.lock:
cls.STATE = instance.ctx
@classmethod
def load(cls, default):
with cls.lock:
if cls.STATE is not None:
return CTX(cls.STATE)
else:
return CTX(default)
def get_ctx():
if "ctx" not in g:
g.ctx = CTX.load(default_ctx())
return g.ctx.get()
def set_ctx(ctx):
get_ctx()
g.ctx.set(ctx)
def _bucket_config():
return {
"commit": "HEAD",
"github": "https://github.com/vanpelt",
"config": '{"foo":{"value":"bar"}}',
"files": {
"edges": [
{
"node": {
"directUrl": request.url_root
+ "/storage?file=wandb-metadata.json",
"name": "wandb-metadata.json",
}
},
{
"node": {
"directUrl": request.url_root + "/storage?file=diff.patch",
"name": "diff.patch",
}
},
]
},
}
class HttpException(Exception):
status_code = 500
def __init__(self, message, status_code=None, payload=None):
Exception.__init__(self)
self.message = message
if status_code is not None:
self.status_code = status_code
self.payload = payload
def to_dict(self):
rv = dict(self.payload or ())
rv["error"] = self.message
return rv
def create_app(user_ctx=None):
app = Flask(__name__)
# When starting in live mode, user_ctx is a fancy object
if isinstance(user_ctx, dict):
with app.app_context():
set_ctx(user_ctx)
@app.teardown_appcontext
def persist_ctx(exc):
if "ctx" in g:
CTX.persist(g.ctx)
@app.errorhandler(HttpException)
def handle_http_exception(error):
response = jsonify(error.to_dict())
response.status_code = error.status_code
return response
@app.route("/ctx", methods=["GET", "PUT", "DELETE"])
def update_ctx():
"""Updating context for live_mock_server"""
ctx = get_ctx()
body = request.get_json()
if request.method == "GET":
return json.dumps(ctx)
elif request.method == "DELETE":
app.logger.info("reseting context")
set_ctx(default_ctx())
return json.dumps(get_ctx())
else:
ctx.update(body)
# TODO: tests in CI failed on this
set_ctx(ctx)
app.logger.info("updated context %s", ctx)
return json.dumps(get_ctx())
@app.route("/graphql", methods=["POST"])
def graphql():
# TODO: in tests wandb-username is set to the test name, lets scope ctx to it
ctx = get_ctx()
test_name = request.headers.get("X-WANDB-USERNAME")
if test_name:
app.logger.info("Test request from: %s", test_name)
app.logger.info("graphql post")
if "fail_graphql_times" in ctx:
if ctx["fail_graphql_count"] < ctx["fail_graphql_times"]:
ctx["fail_graphql_count"] += 1
return json.dumps({"errors": ["Server down"]}), 500
if "rate_limited_times" in ctx:
if ctx["rate_limited_count"] < ctx["rate_limited_times"]:
ctx["rate_limited_count"] += 1
return json.dumps({"error": "rate limit exceeded"}), 429
body = request.get_json()
app.logger.info("graphql post body: %s", body)
if body["variables"].get("run"):
ctx["current_run"] = body["variables"]["run"]
if "mutation UpsertBucket(" in body["query"]:
param_config = body["variables"].get("config")
if param_config:
ctx.setdefault("config", []).append(json.loads(param_config))
param_summary = body["variables"].get("summaryMetrics")
if param_summary:
ctx.setdefault("summary", []).append(json.loads(param_summary))
ctx["upsert_bucket_count"] += 1
if body["variables"].get("files"):
requested_file = body["variables"]["files"][0]
ctx["requested_file"] = requested_file
url = request.url_root + "/storage?file={}&run={}".format(
urllib.parse.quote(requested_file), ctx["current_run"]
)
return json.dumps(
{
"data": {
"model": {
"bucket": {
"id": "storageid",
"files": {
"uploadHeaders": [],
"edges": [
{
"node": {
"name": requested_file,
"url": url,
"directUrl": url + "&direct=true",
}
}
],
},
}
}
}
}
)
if "historyTail" in body["query"]:
if ctx["resume"] is True:
hist_tail = '["{\\"_step\\": 15, \\"acc\\": 1, \\"_runtime\\": 60}"]'
return json.dumps(
{
"data": {
"model": {
"bucket": {
"name": "test",
"displayName": "funky-town-13",
"id": "test",
"config": '{"epochs": {"value": 10}}',
"summaryMetrics": '{"acc": 10, "best_val_loss": 0.5}',
"logLineCount": 14,
"historyLineCount": 15,
"eventsLineCount": 0,
"historyTail": hist_tail,
"eventsTail": '["{\\"_runtime\\": 70}"]',
}
}
}
}
)
else:
return json.dumps({"data": {"model": {"bucket": None}}})
if "query Runs(" in body["query"]:
return json.dumps(
{
"data": {
"project": {
"runCount": 4,
"readOnly": False,
"runs": paginated(run(ctx), ctx),
}
}
}
)
if "query Run(" in body["query"]:
return json.dumps({"data": {"project": {"run": run(ctx)}}})
if "query Model(" in body["query"]:
if "project(" in body["query"]:
project_field_name = "project"
run_field_name = "run"
else:
project_field_name = "model"
run_field_name = "bucket"
if "commit" in body["query"]:
run_config = _bucket_config()
else:
run_config = run(ctx)
return json.dumps(
{"data": {project_field_name: {run_field_name: run_config}}}
)
if "query Models(" in body["query"]:
return json.dumps(
{
"data": {
"models": {
"edges": [
{
"node": {
"id": "123",
"name": "myname",
"project": "myproj",
}
}
]
}
}
}
)
if "query Projects(" in body["query"]:
return json.dumps(
{
"data": {
"models": paginated(
{
"id": "1",
"name": "test-project",
"entityName": body["variables"]["entity"],
"createdAt": "now",
"isBenchmark": False,
},
ctx,
)
}
}
)
if "query Viewer " in body["query"]:
return json.dumps(
{
"data": {
"viewer": {
"entity": "mock_server_entity",
"flags": '{"code_saving_enabled": true}',
"teams": {
"edges": [] # TODO make configurable for cli_test
},
}
}
}
)
if "query Sweep(" in body["query"]:
return json.dumps(
{
"data": {
"project": {
"sweep": {
"id": "1234",
"name": "fun-sweep-10",
"state": "running",
"bestLoss": 0.33,
"config": yaml.dump(
{"metric": {"name": "loss", "value": "minimize"}}
),
"createdAt": datetime.now().isoformat(),
"heartbeatAt": datetime.now().isoformat(),
"updatedAt": datetime.now().isoformat(),
"earlyStopJobRunning": False,
"controller": None,
"scheduler": None,
"runs": paginated(run(ctx), ctx),
}
}
}
}
)
if "mutation UpsertSweep(" in body["query"]:
return json.dumps(
{
"data": {
"upsertSweep": {
"sweep": {
"name": "test",
"project": {
"id": "1234",
"name": "test",
"entity": {"id": "1234", "name": "test"},
},
}
}
}
}
)
if "mutation CreateAgent(" in body["query"]:
return json.dumps(
{"data": {"createAgent": {"agent": {"id": "mock-server-agent-93xy",}}}}
)
if "mutation Heartbeat(" in body["query"]:
return json.dumps(
{
"data": {
"agentHeartbeat": {
"agent": {"id": "mock-server-agent-93xy",},
"commands": json.dumps(
[
{
"type": "run",
"run_id": "mocker-sweep-run-x9",
"args": {"learning_rate": {"value": 0.99124}},
}
]
),
}
}
}
)
if "mutation UpsertBucket(" in body["query"]:
response = {
"data": {
"upsertBucket": {
"bucket": {
"id": "storageid",
"name": body["variables"].get("name", "abc123"),
"displayName": "lovely-dawn-32",
"project": {
"name": "test",
"entity": {"name": "mock_server_entity"},
},
},
"inserted": ctx["resume"] is False,
}
}
}
if body["variables"].get("name") == "mocker-sweep-run-x9":
response["data"]["upsertBucket"]["bucket"][
"sweepName"
] = "test-sweep-id"
return json.dumps(response)
if "mutation DeleteRun(" in body["query"]:
return json.dumps({"data": {}})
if "mutation CreateAnonymousApiKey " in body["query"]:
return json.dumps(
{
"data": {
"createAnonymousEntity": {"apiKey": {"name": "ANONYMOOSE" * 4}}
}
}
)
if "mutation DeleteFiles(" in body["query"]:
return json.dumps({"data": {"deleteFiles": {"success": True}}})
if "mutation PrepareFiles(" in body["query"]:
nodes = []
for i, file_spec in enumerate(body["variables"]["fileSpecs"]):
url = request.url_root + "/storage?file=%s" % file_spec["name"]
nodes.append(
{
"node": {
"id": str(i),
"name": file_spec["name"],
"displayName": file_spec["name"],
"digest": "null",
"uploadUrl": url,
"uploadHeaders": "",
}
}
)
return json.dumps({"data": {"prepareFiles": {"files": {"edges": nodes}}}})
if "mutation CreateArtifact(" in body["query"]:
collection_name = body["variables"]["artifactCollectionNames"][0]
ctx["artifacts"] = ctx.get("artifacts", {})
ctx["artifacts"][collection_name] = ctx["artifacts"].get(
collection_name, []
)
ctx["artifacts"][collection_name].append(body["variables"])
_id = body.get("variables", {}).get("digest", "")
if _id != "":
ctx.get("artifacts_by_id")[_id] = body["variables"]
return {
"data": {
"createArtifact": {
"artifact": artifact(
ctx,
collection_name,
id_override=_id,
state="COMMITTED"
if "PENDING" not in collection_name
else "PENDING",
)
}
}
}
if "mutation CreateArtifactManifest(" in body["query"]:
manifest = {
"id": 1,
"type": "INCREMENTAL"
if "incremental" in body.get("variables", {}).get("name", "")
else "FULL",
"file": {
"id": 1,
"directUrl": request.url_root
+ "/storage?file=wandb_manifest.json&name={}".format(
body.get("variables", {}).get("name", "")
),
"uploadUrl": request.url_root + "/storage?file=wandb_manifest.json",
"uploadHeaders": "",
},
}
ctx["manifests_created"].append(manifest)
return {"data": {"createArtifactManifest": {"artifactManifest": manifest,}}}
if "mutation UpdateArtifactManifest(" in body["query"]:
manifest = {
"id": 1,
"type": "INCREMENTAL"
if "incremental" in body.get("variables", {}).get("name", "")
else "FULL",
"file": {
"id": 1,
"directUrl": request.url_root
+ "/storage?file=wandb_manifest.json&name={}".format(
body.get("variables", {}).get("name", "")
),
"uploadUrl": request.url_root + "/storage?file=wandb_manifest.json",
"uploadHeaders": "",
},
}
return {"data": {"updateArtifactManifest": {"artifactManifest": manifest,}}}
if "mutation CreateArtifactFiles" in body["query"]:
return {
"data": {
"files": [
{
"node": {
"id": idx,
"name": file["name"],
"uploadUrl": "",
"uploadheaders": [],
"artifact": {"id": file["artifactID"]},
}
for idx, file in enumerate(
body["variables"]["artifactFiles"]
)
}
],
}
}
if "mutation CommitArtifact(" in body["query"]:
return {
"data": {
"commitArtifact": {
"artifact": {"id": 1, "digest": "0000===================="}
}
}
}
if "mutation UseArtifact(" in body["query"]:
return {"data": {"useArtifact": {"artifact": artifact(ctx)}}}
if "query ProjectArtifactType(" in body["query"]:
return {
"data": {
"project": {
"artifactType": {
"id": "1",
"name": "dataset",
"description": "",
"createdAt": datetime.now().isoformat(),
}
}
}
}
if "query ProjectArtifacts(" in body["query"]:
return {
"data": {
"project": {
"artifactTypes": paginated(
{
"id": "1",
"name": "dataset",
"description": "",
"createdAt": datetime.now().isoformat(),
},
ctx,
)
}
}
}
if "query ProjectArtifactCollections(" in body["query"]:
return {
"data": {
"project": {
"artifactType": {
"artifactSequences": paginated(
{
"id": "1",
"name": "mnist",
"description": "",
"createdAt": datetime.now().isoformat(),
},
ctx,
)
}
}
}
}
if "query RunArtifacts(" in body["query"]:
if "inputArtifacts" in body["query"]:
key = "inputArtifacts"
else:
key = "outputArtifacts"
artifacts = paginated(artifact(ctx), ctx)
artifacts["totalCount"] = ctx["page_times"]
return {"data": {"project": {"run": {key: artifacts}}}}
if "query Artifacts(" in body["query"]:
version = "v%i" % ctx["page_count"]
artifacts = paginated(artifact(ctx), ctx, {"version": version})
artifacts["totalCount"] = ctx["page_times"]
return {
"data": {
"project": {
"artifactType": {
"artifactSequence": {
"name": "mnist",
"artifacts": artifacts,
}
}
}
}
}
if "query Artifact(" in body["query"]:
art = artifact(ctx, request_url_root=request.url_root)
if "id" in body.get("variables", {}):
art = artifact(
ctx,
request_url_root=request.url_root,
id_override=body.get("variables", {}).get("id"),
)
art["artifactType"] = {"id": 1, "name": "dataset"}
return {"data": {"artifact": art}}
# code artifacts use source-RUNID names, we return the code type
art["artifactType"] = {"id": 2, "name": "code"}
if "source" not in body["variables"]["name"]:
art["artifactType"] = {"id": 1, "name": "dataset"}
if "logged_table" in body["variables"]["name"]:
art["artifactType"] = {"id": 3, "name": "run_table"}
if "run-" in body["variables"]["name"]:
art["artifactType"] = {"id": 4, "name": "run_table"}
if "wb_validation_data" in body["variables"]["name"]:
art["artifactType"] = {"id": 4, "name": "validation_dataset"}
return {"data": {"project": {"artifact": art}}}
if "query ArtifactManifest(" in body["query"]:
art = artifact(ctx)
art["currentManifest"] = {
"id": 1,
"file": {
"id": 1,
"directUrl": request.url_root
+ "/storage?file=wandb_manifest.json&name={}".format(
body.get("variables", {}).get("name", "")
),
},
}
return {"data": {"project": {"artifact": art}}}
if "stopped" in body["query"]:
return json.dumps(
{
"data": {
"Model": {
"project": {"run": {"stopped": ctx.get("stopped", False)}}
}
}
}
)
print("MISSING QUERY, add me to tests/mock_server.py", body["query"])
error = {"message": "Not implemented in tests/mock_server.py", "body": body}
return json.dumps({"errors": [error]})
@app.route("/storage", methods=["PUT", "GET"])
def storage():
ctx = get_ctx()
if "fail_storage_times" in ctx:
if ctx["fail_storage_count"] < ctx["fail_storage_times"]:
ctx["fail_storage_count"] += 1
return json.dumps({"errors": ["Server down"]}), 500
file = request.args.get("file")
_id = request.args.get("id", "")
run = request.args.get("run", "unknown")
ctx["storage"] = ctx.get("storage", {})
ctx["storage"][run] = ctx["storage"].get(run, [])
ctx["storage"][run].append(request.args.get("file"))
size = ctx["files"].get(request.args.get("file"))
if request.method == "GET" and size:
return os.urandom(size), 200
# make sure to read the data
request.get_data()
if request.method == "PUT":
curr = ctx["file_bytes"].get(file)
if curr is None:
ctx["file_bytes"].setdefault(file, 0)
ctx["file_bytes"][file] += request.content_length
else:
ctx["file_bytes"][file] += request.content_length
if file == "wandb_manifest.json":
if _id in ctx.get("artifacts_by_id"):
art = ctx["artifacts_by_id"][_id]
if "-validation_predictions" in art["artifactCollectionNames"][0]:
return {
"version": 1,
"storagePolicy": "wandb-storage-policy-v1",
"storagePolicyConfig": {},
"contents": {
"validation_predictions.table.json": {
"digest": "3aaaaaaaaaaaaaaaaaaaaa==",
"size": 81299,
}
},
}
if "wb_validation_data" in art["artifactCollectionNames"][0]:
return {
"version": 1,
"storagePolicy": "wandb-storage-policy-v1",
"storagePolicyConfig": {},
"contents": {
"validation_data.table.json": {
"digest": "3aaaaaaaaaaaaaaaaaaaaa==",
"size": 81299,
},
"media/tables/5aac4cea.table.json": {
"digest": "3aaaaaaaaaaaaaaaaaaaaa==",
"size": 81299,
},
},
}
if request.args.get("name") == "my-test_reference_download:latest":
return {
"version": 1,
"storagePolicy": "wandb-storage-policy-v1",
"storagePolicyConfig": {},
"contents": {
"StarWars3.wav": {
"digest": "a90eb05f7aef652b3bdd957c67b7213a",
"size": 81299,
"ref": "https://wandb-artifacts-refs-public-test.s3-us-west-2.amazonaws.com/StarWars3.wav",
},
"file1.txt": {
"digest": "0000====================",
"size": 81299,
},
},
}
elif (
_id == "bb8043da7d78ff168a695cff097897d2"
or _id == "ad4d74ac0e4167c6cf4aaad9d59b9b44"
):
return {
"version": 1,
"storagePolicy": "wandb-storage-policy-v1",
"storagePolicyConfig": {},
"contents": {
"t1.table.json": {
"digest": "3aaaaaaaaaaaaaaaaaaaaa==",
"size": 81299,
}
},
}
elif _id == "b89758a7e7503bdb021e0534fe444d9a":
return {
"version": 1,
"storagePolicy": "wandb-storage-policy-v1",
"storagePolicyConfig": {},
"contents": {
"logged_table.table.json": {
"digest": "3aaaaaaaaaaaaaaaaaaaaa==",
"size": 81299,
}
},
}
elif _id == "b9a598178557aed1d89bd93ec0db989b":
return {
"version": 1,
"storagePolicy": "wandb-storage-policy-v1",
"storagePolicyConfig": {},
"contents": {
"logged_table_2.table.json": {
"digest": "3aaaaaaaaaaaaaaaaaaaaa==",
"size": 81299,
}
},
}
elif _id in [
"2d9a7e0aa8407f0730e19e5bc55c3a45",
"c541de19b18331a4a33b282fc9d42510",
"6f3d6ed5417d2955afbc73bff0ed1609",
"7d797e62834a7d72538529e91ed958e2",
"03d3e221fd4da6c5fccb1fbd75fe475e",
"464aa7e0d7c3f8230e3fe5f10464a2e6",
"8ef51aeabcfcd89b719822de64f6a8bf",
]:
return {
"version": 1,
"storagePolicy": "wandb-storage-policy-v1",
"storagePolicyConfig": {},
"contents": {
"validation_data.table.json": {
"digest": "3aaaaaaaaaaaaaaaaaaaaa==",
"size": 81299,
},
"media/tables/e14239fe.table.json": {
"digest": "3aaaaaaaaaaaaaaaaaaaaa==",
"size": 81299,
},
},
}
elif (
len(ctx.get("graphql", [])) >= 3
and ctx["graphql"][2].get("variables", {}).get("name", "") == "dummy:v0"
) or request.args.get("name") == "dummy:v0":
return {
"version": 1,
"storagePolicy": "wandb-storage-policy-v1",
"storagePolicyConfig": {},
"contents": {
"dataset.partitioned-table.json": {
"digest": "0aaaaaaaaaaaaaaaaaaaaa==",
"size": 81299,
},
"parts/1.table.json": {
"digest": "1aaaaaaaaaaaaaaaaaaaaa==",
"size": 81299,
},
"t.table.json": {
"digest": "2aaaaaaaaaaaaaaaaaaaaa==",
"size": 123,
},
},
}
else:
return {
"version": 1,
"storagePolicy": "wandb-storage-policy-v1",
"storagePolicyConfig": {},
"contents": {
"digits.h5": {
"digest": "TeSJ4xxXg0ohuL5xEdq2Ew==",
"size": 81299,
},
},
}
elif file == "wandb-metadata.json":
return {
"docker": "test/docker",
"program": "train.py",
"args": ["--test", "foo"],
"git": ctx.get("git", {}),
}
elif file == "diff.patch":
# TODO: make sure the patch is valid for windows as well,
# and un skip the test in test_cli.py
return r"""
diff --git a/patch.txt b/patch.txt
index 30d74d2..9a2c773 100644
--- a/patch.txt
+++ b/patch.txt
@@ -1 +1 @@
-test
\ No newline at end of file
+testing
\ No newline at end of file
"""
return "", 200
@app.route("/artifacts/<entity>/<digest>", methods=["GET", "POST"])
def artifact_file(entity, digest):
if entity == "entity":
if (
digest == "d1a69a69a69a69a69a69a69a69a69a69"
): # "dataset.partitioned-table.json"
return (
json.dumps({"_type": "partitioned-table", "parts_path": "parts"}),
200,
)
elif digest == "d5a69a69a69a69a69a69a69a69a69a69": # "parts/1.table.json"
return (
json.dumps(
{
"_type": "table",
"column_types": {
"params": {
"type_map": {
"A": {
"params": {
"allowed_types": [
{"wb_type": "none"},
{"wb_type": "number"},
]
},
"wb_type": "union",
},
"B": {
"params": {
"allowed_types": [
{"wb_type": "none"},
{"wb_type": "number"},
]
},
"wb_type": "union",
},
"C": {
"params": {
"allowed_types": [
{"wb_type": "none"},
{"wb_type": "number"},
]
},
"wb_type": "union",
},
}
},
"wb_type": "dictionary",
},
"columns": ["A", "B", "C"],
"data": [[0, 0, 1]],
"ncols": 3,
"nrows": 1,
}
),
200,
)
elif digest == "d9a69a69a69a69a69a69a69a69a69a69": # "t.table.json"
return (
json.dumps(
{
"_type": "table",
"column_types": {
"params": {"type_map": {}},
"wb_type": "dictionary",
},
"columns": [],
"data": [],
"ncols": 0,
"nrows": 0,
}
),
200,
)
if digest == "dda69a69a69a69a69a69a69a69a69a69":
return (
json.dumps({"_type": "table-file", "columns": [], "data": []}),
200,
)
return "ARTIFACT %s" % digest, 200
@app.route("/files/<entity>/<project>/<run>/file_stream", methods=["POST"])
def file_stream(entity, project, run):
ctx = get_ctx()
ctx["file_stream"] = ctx.get("file_stream", [])
ctx["file_stream"].append(request.get_json())
response = json.dumps({"exitcode": None, "limits": {}})
inject = InjectRequestsParse(ctx).find(request=request)
if inject:
if inject.response:
response = inject.response
if inject.http_status:
# print("INJECT", inject, inject.http_status)
raise HttpException("some error", status_code=inject.http_status)
return response
@app.route("/api/v1/namespaces/default/pods/test")
def k8s_pod():
ctx = get_ctx()
image_id = b"docker-pullable://test@sha256:1234"
ms = b'{"status":{"containerStatuses":[{"imageID":"%s"}]}}' % image_id
if ctx.get("k8s"):
return ms, 200
else:
return b"", 500
@app.route("/api/sessions")
def jupyter_sessions():
return json.dumps(
[
{
"kernel": {"id": "12345"},
"notebook": {"path": "test.ipynb", "name": "test.ipynb"},
}
]
)
@app.route("/wandb_url", methods=["PUT"])
def spell_url():
ctx = get_ctx()
ctx["spell_data"] = request.get_json()
return json.dumps({"success": True})
@app.route("/pypi/<library>/json")
def pypi(library):
version = getattr(wandb, "__hack_pypi_latest_version__", wandb.__version__)
return json.dumps(
{
"info": {"version": version},
"releases": {
"88.1.2rc2": [],
"88.1.2rc12": [],
"88.1.2rc3": [],
"88.1.2rc4": [],
"0.0.8rc6": [],
"0.0.8rc2": [],
"0.0.8rc3": [],
"0.0.8rc8": [],
"0.0.2": [{"yanked": True}],
"0.0.3": [{"yanked": True, "yanked_reason": "just cuz"}],
"0.0.7": [],
"0.0.5": [],
"0.0.6": [],
},
}
)
@app.errorhandler(404)
def page_not_found(e):
print("Got request to: %s (%s)" % (request.url, request.method))
return "Not Found", 404
return app
class ParseCTX(object):
def __init__(self, ctx):
self._ctx = ctx
def get_filestream_file_updates(self):
data = {}
file_stream_updates = self._ctx["file_stream"]
for update in file_stream_updates:
files = update.get("files")
if not files:
continue
for k, v in six.iteritems(files):
data.setdefault(k, []).append(v)
return data
def get_filestream_file_items(self):
data = {}
fs_file_updates = self.get_filestream_file_updates()
for k, v in six.iteritems(fs_file_updates):
l = []
for d in v:
offset = d.get("offset")
content = d.get("content")
assert offset is not None
assert content is not None
assert offset == 0 or offset == len(l), (k, v, l, d)
if not offset:
l = []
if k == u"output.log":
lines = [content]
else:
lines = map(json.loads, content)
l.extend(lines)
data[k] = l
return data
@property
def summary(self):
fs_files = self.get_filestream_file_items()
summary = fs_files["wandb-summary.json"][-1]
return summary
@property
def history(self):
fs_files = self.get_filestream_file_items()
history = fs_files["wandb-history.jsonl"]
return history
@property
def config(self):
return self._ctx["config"][-1]
@property
def config_wandb(self):
return self.config["_wandb"]["value"]
@property
def telemetry(self):
return self.config.get("_wandb", {}).get("value", {}).get("t")
@property
def metrics(self):
return self.config.get("_wandb", {}).get("value", {}).get("m")
@property
def manifests_created(self):
return self._ctx.get("manifests_created") or []
if __name__ == "__main__":
app = create_app()
app.logger.setLevel(logging.INFO)
app.run(debug=False, port=int(os.environ.get("PORT", 8547)))
|
py
|
1a59f630556f6c11df96b3319956c105a60853c3
|
# pylint: disable=redefined-outer-name
# start_marker
import pandas as pd
from dagster import AssetKey
from dagster.core.asset_defs import ForeignAsset, asset
from pandas import DataFrame
sfo_q2_weather_sample = ForeignAsset(key=AssetKey("sfo_q2_weather_sample"))
@asset
def daily_temperature_highs(sfo_q2_weather_sample: DataFrame) -> DataFrame:
"""Computes the temperature high for each day"""
sfo_q2_weather_sample["valid_date"] = pd.to_datetime(sfo_q2_weather_sample["valid"])
return sfo_q2_weather_sample.groupby("valid_date").max().rename(columns={"tmpf": "max_tmpf"})
@asset
def hottest_dates(daily_temperature_highs: DataFrame) -> DataFrame:
"""Computes the 10 hottest dates"""
return daily_temperature_highs.nlargest(10, "max_tmpf")
# end_marker
|
py
|
1a59f67d4f9e03d8771011d16f7ca58521ec728b
|
# Copyright 2019 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from impala.dbapi import connect
class TestHttpConnect(object):
def test_simple_connect(self):
con = connect("localhost", 28000, use_http_transport=True)
cur = con.cursor()
cur.execute('select 1')
rows = cur.fetchall()
assert rows == [(1,)]
|
py
|
1a59f6d79c55eed0092626f25454d4e55c81d6db
|
# -*- coding: utf-8 -*-
import datetime
import logging
import os.path
import sqlite3
import string
from nmj.tables import ALL_TABLES, DbVersion, ScanDirs, ScanSystem, ShowGroups
_LOGGER = logging.getLogger(__name__)
INDEXES = [
"CREATE INDEX IDX_PHOTOS_TITLE ON PHOTOS(TITLE ASC);",
"CREATE INDEX IDX_PHOTOS_SEARCH_TITLE ON PHOTOS(SEARCH_TITLE ASC);",
"CREATE INDEX IDX_PHOTO_ALBUMS_PHOTOS_PHOTO_ALBUMS_ID ON PHOTO_ALBUMS_PHOTOS(PHOTO_ALBUMS_ID ASC);",
"CREATE INDEX IDX_PHOTO_ALBUMS_PHOTOS_PHOTOS_ID ON PHOTO_ALBUMS_PHOTOS(PHOTOS_ID ASC);",
"CREATE INDEX IDX_PHOTO_DATE_CAPTURE_TIME ON PHOTO_DATE(CAPTURE_TIME ASC);",
"CREATE INDEX IDX_SHOWS_CONTENT_TTID ON SHOWS(CONTENT_TTID ASC);",
"CREATE INDEX IDX_SHOWS_TITLE ON SHOWS(TITLE ASC);",
"CREATE INDEX IDX_SHOWS_SEARCH_TITLE ON SHOWS(SEARCH_TITLE ASC);",
"CREATE INDEX IDX_SHOWS_YEAR ON SHOWS(YEAR ASC);",
"CREATE INDEX IDX_SHOWS_RATING ON SHOWS(RATING ASC);",
"CREATE INDEX IDX_SHOWS_PARENTAL_CONTROL ON SHOWS(PARENTAL_CONTROL ASC);",
"CREATE INDEX IDX_SONGS_TITLE ON SONGS(TITLE ASC);",
"CREATE INDEX IDX_SONGS_SEARCH_TITLE ON SONGS(SEARCH_TITLE ASC);",
"CREATE INDEX IDX_SONGS_RATING ON SONGS(RATING ASC);",
"CREATE INDEX IDX_SONGS_RELEASE_DATE ON SONGS(RELEASE_DATE ASC);",
"CREATE INDEX IDX_SONG_ALBUMS_TITLE ON SONG_ALBUMS(TITLE ASC);",
"CREATE INDEX IDX_SONG_ALBUMS_SEARCH_TITLE ON SONG_ALBUMS(SEARCH_TITLE ASC);",
"CREATE INDEX IDX_SONG_ALBUMS_RELEASE_DATE ON SONG_ALBUMS(RELEASE_DATE ASC);",
"CREATE INDEX IDX_SONG_ALBUM_SONGS_ALBUMS_ID ON SONG_ALBUMS_SONGS(ALBUMS_ID ASC);",
"CREATE INDEX IDX_SONG_ALBUM_SONGS_SONGS_ID ON SONG_ALBUMS_SONGS(SONGS_ID ASC);",
"CREATE INDEX IDX_SONG_GENRES_SONGS_GENRES_ID ON SONG_GENRES_SONGS(GENRES_ID ASC);",
"CREATE INDEX IDX_SONG_GENRES_SONGS_SONGS_ID ON SONG_GENRES_SONGS(SONGS_ID ASC);",
"CREATE INDEX IDX_SONG_GENRES_SONG_ALBUMS_ALBUMS_ID ON SONG_GENRES_SONG_ALBUMS(ALBUMS_ID ASC);",
"CREATE INDEX IDX_SONG_GENRES_SONG_ALBUMS_GENRES_ID ON SONG_GENRES_SONG_ALBUMS(GENRES_ID ASC);",
"CREATE INDEX IDX_SONG_GROUPS_SONG_ALBUMS_GROUPS_ID ON SONG_GROUPS_SONG_ALBUMS(GROUPS_ID ASC);",
"CREATE INDEX IDX_SONG_GROUPS_SONG_ALBUMS_ALBUMS_ID ON SONG_GROUPS_SONG_ALBUMS(ALBUMS_ID ASC);",
"CREATE INDEX IDX_SONG_PERSONS_SONGS_PERSONS_ID ON SONG_PERSONS_SONGS(PERSONS_ID ASC);",
"CREATE INDEX IDX_SONG_PERSONS_SONGS_SONGS_ID ON SONG_PERSONS_SONGS(SONGS_ID ASC);",
"CREATE INDEX IDX_SONG_PERSONS_SONG_ALBUMS_PERSONS_ID ON SONG_PERSONS_SONG_ALBUMS(PERSONS_ID ASC);",
"CREATE INDEX IDX_SONG_PERSONS_SONG_ALBUMS_ALBUMS_ID ON SONG_PERSONS_SONG_ALBUMS(ALBUMS_ID ASC);",
"CREATE INDEX IDX_VIDEO_SUBTITLES_VIDEOS_ID ON VIDEO_SUBTITLES(VIDEOS_ID ASC);",
]
class DBProxy(object):
isolation_level = "DEFERRED"
def __init__(self, root_path, popcorn_path=""):
self.root_path = root_path
self.popcorn_path = popcorn_path
self.media_db_path = os.path.join(root_path, "nmj_database", "media.db")
if not os.path.isfile(self.media_db_path):
self.create()
self.connection, self.cursor = self.get_connection_and_cursor()
def get_connection_and_cursor(self):
if not os.path.isdir(os.path.join(self.root_path, "nmj_database")):
os.makedirs(os.path.dirname(self.media_db_path))
connection = sqlite3.connect(self.media_db_path)
connection.isolation_level = self.isolation_level
connection.text_factory = str
cursor = connection.cursor()
return connection, cursor
def create(self):
_LOGGER.info("Creating database...")
connection, cursor = self.get_connection_and_cursor()
for table in ALL_TABLES:
_LOGGER.debug("create table %s", table)
table().create(cursor)
DbVersion.insert(cursor, version="2.0.0")
ScanDirs.insert(cursor, directory="", name=self.popcorn_path, scan_time="", size=1807172, category=3, status=3)
ScanSystem.insert(cursor, type="RUNNING_STATUS", value="0")
ScanSystem.insert(cursor, type="HISTORY_SCAN_VIDEOS", value=datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), custom1="1", custom2="89", custom3="0")
for group in ["0-9",] + [letter for letter in string.ascii_uppercase]:
ShowGroups.insert(cursor, name=group, language="FR")
for request in INDEXES:
cursor.execute(request)
connection.commit()
cursor.close()
connection.close()
_LOGGER.info("Database creation done")
def contains(self, table, **kwargs):
items = self.get_tables_items(table, **kwargs)
return bool(items)
def get_first(self, table, **kwargs):
try:
return self.get_tables_items(table, **kwargs)[0]
except IndexError:
return None
def get_tables_items(self, *tables, **kwargs):
result = []
for table in tables:
try:
result += table.load(self.cursor, **kwargs)
except:
_LOGGER.exception("Getting items in table %s", table)
return result
def insert(self, table, **kwargs):
return table.insert(self.cursor, **kwargs)
def commit(self):
self.connection.commit()
def delete(self, to_remove):
to_remove.delete(self.cursor)
def update(self, table, item_id, **kwargs):
item = self.get_tables_items(table, id=item_id)[0]
item.update(self.cursor, **kwargs)
self.commit()
|
py
|
1a59f70184d6b5234443319edf57226e9d973c7c
|
# -*- coding: utf-8 -*-
# encoding: utf-8
# coding: utf-8
from __future__ import absolute_import, print_function, unicode_literals # NOQA
import sys
from os import path
import logging
logging.basicConfig(level=logging.WARNING, stream=sys.stderr) # TODO: FIXME: no log output!?
DIR=path.dirname( path.dirname( path.abspath(__file__) ) )
sys.path.append(DIR)
sys.path.append(path.join(DIR, 'hilbert_config'))
from helpers import *
from hilbert_cli_config import *
from subcmdparser import *
import pytest # NOQA
def load(s):
return load_yaml(s)
class TestLoad:
def test_1(self, capsys):
out, err = capsys.readouterr()
load('{a, b, a}')
out, err = capsys.readouterr()
# with capsys.disabled():
assert err == ''
assert out == """\
WARNING: Key re-definition within some mapping:
K[line: 1, column: 2]: Previous Value:
a: None
↑
---
K[line: 1, column: 8]: New Value:
a: None
↑
---
===
"""
def test_2(self, capsys):
out, err = capsys.readouterr()
load("""{ ? a, ? b, ? a }""")
out, err = capsys.readouterr()
assert err == ''
assert out == """\
WARNING: Key re-definition within some mapping:
K[line: 1, column: 5]: Previous Value:
a: None
↑
---
K[line: 1, column: 15]: New Value:
a: None
↑
---
===
"""
|
py
|
1a59f7d8d8666012d303866f4a1a0205c86b6c94
|
#!/usr/bin/env python3
import telnetlib
import time
# yum install python3 (centos7.9)
# 要请求的IP和端口号
Host = '192.168.89.135'
Port = '22'
def do_telnet(Host, Port):
try:
tn = telnetlib.Telnet(Host, Port, timeout=5)
tn.close()
except:
return False
return True
while True:
time.sleep(5)
res = do_telnet(Host, Port)
print(str(Host) + ':' + str(Port) + ' ' + str(res))
|
py
|
1a59f7e63c2a01730b8cc4b91f3885e3aa00945e
|
#!/usr/bin/env python
# encoding: utf-8
def run(whatweb, pluginname):
whatweb.recog_from_file(pluginname, "admin/template/article_more/config.htm", "width")
whatweb.recog_from_file(pluginname, "robots.txt", "qibo")
|
py
|
1a59f94e7e7776f008357427b0b4320743e5b8c4
|
from sainpse import __version__
def test_version():
assert __version__ == '0.1.0'
|
py
|
1a59fac52a3433ddc58c2fc4642cc4d03948df15
|
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import lsp_tlv_entry
class lsp_entry(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-isis-operational - based on the path /isis-state/database/lsp-entry. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: ISIS LSP MO
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__lsp_level','__lsp_id','__lsp_seq_no','__lsp_checksum','__lsp_holdtime','__lsp_att','__lsp_p','__lsp_ol','__lsp_tlv_entry',)
_yang_name = 'lsp-entry'
_rest_name = 'lsp-entry'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__lsp_tlv_entry = YANGDynClass(base=YANGListType("type",lsp_tlv_entry.lsp_tlv_entry, yang_name="lsp-tlv-entry", rest_name="lsp-tlv-entry", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='type', extensions={u'tailf-common': {u'callpoint': u'isis-lsp-tlv', u'cli-suppress-show-path': None}}), is_container='list', yang_name="lsp-tlv-entry", rest_name="lsp-tlv-entry", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'isis-lsp-tlv', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='list', is_config=False)
self.__lsp_ol = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="lsp-ol", rest_name="lsp-ol", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
self.__lsp_level = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="lsp-level", rest_name="lsp-level", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
self.__lsp_checksum = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="lsp-checksum", rest_name="lsp-checksum", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint16', is_config=False)
self.__lsp_p = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="lsp-p", rest_name="lsp-p", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
self.__lsp_att = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="lsp-att", rest_name="lsp-att", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
self.__lsp_holdtime = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="lsp-holdtime", rest_name="lsp-holdtime", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
self.__lsp_id = YANGDynClass(base=unicode, is_leaf=True, yang_name="lsp-id", rest_name="lsp-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='string', is_config=False)
self.__lsp_seq_no = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="lsp-seq-no", rest_name="lsp-seq-no", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'isis-state', u'database', u'lsp-entry']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'isis-state', u'database', u'lsp-entry']
def _get_lsp_level(self):
"""
Getter method for lsp_level, mapped from YANG variable /isis_state/database/lsp_entry/lsp_level (uint32)
YANG Description: IS-Level
"""
return self.__lsp_level
def _set_lsp_level(self, v, load=False):
"""
Setter method for lsp_level, mapped from YANG variable /isis_state/database/lsp_entry/lsp_level (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_level is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_level() directly.
YANG Description: IS-Level
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="lsp-level", rest_name="lsp-level", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_level must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="lsp-level", rest_name="lsp-level", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)""",
})
self.__lsp_level = t
if hasattr(self, '_set'):
self._set()
def _unset_lsp_level(self):
self.__lsp_level = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="lsp-level", rest_name="lsp-level", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
def _get_lsp_id(self):
"""
Getter method for lsp_id, mapped from YANG variable /isis_state/database/lsp_entry/lsp_id (string)
YANG Description: LSP ID
"""
return self.__lsp_id
def _set_lsp_id(self, v, load=False):
"""
Setter method for lsp_id, mapped from YANG variable /isis_state/database/lsp_entry/lsp_id (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_id() directly.
YANG Description: LSP ID
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="lsp-id", rest_name="lsp-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_id must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="lsp-id", rest_name="lsp-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='string', is_config=False)""",
})
self.__lsp_id = t
if hasattr(self, '_set'):
self._set()
def _unset_lsp_id(self):
self.__lsp_id = YANGDynClass(base=unicode, is_leaf=True, yang_name="lsp-id", rest_name="lsp-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='string', is_config=False)
def _get_lsp_seq_no(self):
"""
Getter method for lsp_seq_no, mapped from YANG variable /isis_state/database/lsp_entry/lsp_seq_no (uint32)
YANG Description: Seq No
"""
return self.__lsp_seq_no
def _set_lsp_seq_no(self, v, load=False):
"""
Setter method for lsp_seq_no, mapped from YANG variable /isis_state/database/lsp_entry/lsp_seq_no (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_seq_no is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_seq_no() directly.
YANG Description: Seq No
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="lsp-seq-no", rest_name="lsp-seq-no", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_seq_no must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="lsp-seq-no", rest_name="lsp-seq-no", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)""",
})
self.__lsp_seq_no = t
if hasattr(self, '_set'):
self._set()
def _unset_lsp_seq_no(self):
self.__lsp_seq_no = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="lsp-seq-no", rest_name="lsp-seq-no", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
def _get_lsp_checksum(self):
"""
Getter method for lsp_checksum, mapped from YANG variable /isis_state/database/lsp_entry/lsp_checksum (uint16)
YANG Description: Checksum
"""
return self.__lsp_checksum
def _set_lsp_checksum(self, v, load=False):
"""
Setter method for lsp_checksum, mapped from YANG variable /isis_state/database/lsp_entry/lsp_checksum (uint16)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_checksum is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_checksum() directly.
YANG Description: Checksum
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="lsp-checksum", rest_name="lsp-checksum", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint16', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_checksum must be of a type compatible with uint16""",
'defined-type': "uint16",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="lsp-checksum", rest_name="lsp-checksum", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint16', is_config=False)""",
})
self.__lsp_checksum = t
if hasattr(self, '_set'):
self._set()
def _unset_lsp_checksum(self):
self.__lsp_checksum = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="lsp-checksum", rest_name="lsp-checksum", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint16', is_config=False)
def _get_lsp_holdtime(self):
"""
Getter method for lsp_holdtime, mapped from YANG variable /isis_state/database/lsp_entry/lsp_holdtime (uint32)
YANG Description: HoldTime
"""
return self.__lsp_holdtime
def _set_lsp_holdtime(self, v, load=False):
"""
Setter method for lsp_holdtime, mapped from YANG variable /isis_state/database/lsp_entry/lsp_holdtime (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_holdtime is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_holdtime() directly.
YANG Description: HoldTime
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="lsp-holdtime", rest_name="lsp-holdtime", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_holdtime must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="lsp-holdtime", rest_name="lsp-holdtime", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)""",
})
self.__lsp_holdtime = t
if hasattr(self, '_set'):
self._set()
def _unset_lsp_holdtime(self):
self.__lsp_holdtime = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="lsp-holdtime", rest_name="lsp-holdtime", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
def _get_lsp_att(self):
"""
Getter method for lsp_att, mapped from YANG variable /isis_state/database/lsp_entry/lsp_att (uint32)
YANG Description: ATT
"""
return self.__lsp_att
def _set_lsp_att(self, v, load=False):
"""
Setter method for lsp_att, mapped from YANG variable /isis_state/database/lsp_entry/lsp_att (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_att is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_att() directly.
YANG Description: ATT
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="lsp-att", rest_name="lsp-att", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_att must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="lsp-att", rest_name="lsp-att", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)""",
})
self.__lsp_att = t
if hasattr(self, '_set'):
self._set()
def _unset_lsp_att(self):
self.__lsp_att = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="lsp-att", rest_name="lsp-att", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
def _get_lsp_p(self):
"""
Getter method for lsp_p, mapped from YANG variable /isis_state/database/lsp_entry/lsp_p (uint32)
YANG Description: LSP Flag P
"""
return self.__lsp_p
def _set_lsp_p(self, v, load=False):
"""
Setter method for lsp_p, mapped from YANG variable /isis_state/database/lsp_entry/lsp_p (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_p is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_p() directly.
YANG Description: LSP Flag P
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="lsp-p", rest_name="lsp-p", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_p must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="lsp-p", rest_name="lsp-p", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)""",
})
self.__lsp_p = t
if hasattr(self, '_set'):
self._set()
def _unset_lsp_p(self):
self.__lsp_p = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="lsp-p", rest_name="lsp-p", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
def _get_lsp_ol(self):
"""
Getter method for lsp_ol, mapped from YANG variable /isis_state/database/lsp_entry/lsp_ol (uint32)
YANG Description: LSP OL
"""
return self.__lsp_ol
def _set_lsp_ol(self, v, load=False):
"""
Setter method for lsp_ol, mapped from YANG variable /isis_state/database/lsp_entry/lsp_ol (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_ol is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_ol() directly.
YANG Description: LSP OL
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="lsp-ol", rest_name="lsp-ol", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_ol must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="lsp-ol", rest_name="lsp-ol", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)""",
})
self.__lsp_ol = t
if hasattr(self, '_set'):
self._set()
def _unset_lsp_ol(self):
self.__lsp_ol = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="lsp-ol", rest_name="lsp-ol", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)
def _get_lsp_tlv_entry(self):
"""
Getter method for lsp_tlv_entry, mapped from YANG variable /isis_state/database/lsp_entry/lsp_tlv_entry (list)
YANG Description: ISIS LSP TLV
"""
return self.__lsp_tlv_entry
def _set_lsp_tlv_entry(self, v, load=False):
"""
Setter method for lsp_tlv_entry, mapped from YANG variable /isis_state/database/lsp_entry/lsp_tlv_entry (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_tlv_entry is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_tlv_entry() directly.
YANG Description: ISIS LSP TLV
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("type",lsp_tlv_entry.lsp_tlv_entry, yang_name="lsp-tlv-entry", rest_name="lsp-tlv-entry", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='type', extensions={u'tailf-common': {u'callpoint': u'isis-lsp-tlv', u'cli-suppress-show-path': None}}), is_container='list', yang_name="lsp-tlv-entry", rest_name="lsp-tlv-entry", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'isis-lsp-tlv', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='list', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_tlv_entry must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("type",lsp_tlv_entry.lsp_tlv_entry, yang_name="lsp-tlv-entry", rest_name="lsp-tlv-entry", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='type', extensions={u'tailf-common': {u'callpoint': u'isis-lsp-tlv', u'cli-suppress-show-path': None}}), is_container='list', yang_name="lsp-tlv-entry", rest_name="lsp-tlv-entry", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'isis-lsp-tlv', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='list', is_config=False)""",
})
self.__lsp_tlv_entry = t
if hasattr(self, '_set'):
self._set()
def _unset_lsp_tlv_entry(self):
self.__lsp_tlv_entry = YANGDynClass(base=YANGListType("type",lsp_tlv_entry.lsp_tlv_entry, yang_name="lsp-tlv-entry", rest_name="lsp-tlv-entry", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='type', extensions={u'tailf-common': {u'callpoint': u'isis-lsp-tlv', u'cli-suppress-show-path': None}}), is_container='list', yang_name="lsp-tlv-entry", rest_name="lsp-tlv-entry", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'isis-lsp-tlv', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='list', is_config=False)
lsp_level = __builtin__.property(_get_lsp_level)
lsp_id = __builtin__.property(_get_lsp_id)
lsp_seq_no = __builtin__.property(_get_lsp_seq_no)
lsp_checksum = __builtin__.property(_get_lsp_checksum)
lsp_holdtime = __builtin__.property(_get_lsp_holdtime)
lsp_att = __builtin__.property(_get_lsp_att)
lsp_p = __builtin__.property(_get_lsp_p)
lsp_ol = __builtin__.property(_get_lsp_ol)
lsp_tlv_entry = __builtin__.property(_get_lsp_tlv_entry)
_pyangbind_elements = {'lsp_level': lsp_level, 'lsp_id': lsp_id, 'lsp_seq_no': lsp_seq_no, 'lsp_checksum': lsp_checksum, 'lsp_holdtime': lsp_holdtime, 'lsp_att': lsp_att, 'lsp_p': lsp_p, 'lsp_ol': lsp_ol, 'lsp_tlv_entry': lsp_tlv_entry, }
|
py
|
1a59fb0b7e9c16a18fb6110a61cfe6c331e3f928
|
from django.test import TestCase
from exam.forms import UpdateCustomExamForm
from exam.models import CustomExam
from user.models import HealthProfessional
class TestCreateCustomExamForm(TestCase):
def setUp(self):
self.my_view = UpdateCustomExamForm()
self.name_min = None
self.name_valid = "Alguma coisa"
self.name_exists = "Invalido"
self.description_max = """adjhfiahdfiahdufhaisdhfiuahdfuihaiufdhaihfiuhfiuahfidhfiahiudfhaiufhdiuahadjhfiahdfiahdufhaisdhfiuahdfuihai
ufdhaihfiuhfiuahfidhfiahiudfhaiufhdiuahadjhfiahdfiahdufhaisdhfiuahdfuihaiufdhaihfiuhfiuahfidhfiahiudfhaiufhdiuah
adjhfiahdfiahdufhaisdhfiuahdfuihaiufdhaihfiuhfiuahfidhfiahiudfhaiufhdiuahadjhfiahdfiahdufhaisdhfiuahdfuihaiufdha
ihfiuhfiuahfidhfiahiudfhaiufhdiuahadjhfiahdfiahdufhaisdhfiuahdfuihaiufdhaihfiuhfiuahfidhfiahiudfhaiufhdiuahadjhf
iahdfiahdufhaisdhfiuahdfuihaiufdhaihfiuhfiuahfidhfiahiudfhaiufhdiuahadjhfiahdfiahdufhaisdhfiuahdfuihaiufdhaihfiu
hfiuahfidhfiahiudfhaiufhdiuah"""
self.name_max = """adjhfiahdfiahdufhaisdhfiuahdfuihaiufdhaihfiuhfiuahfidhfiahiudfhaiufhdiuahadjhfiahdfiahdufhaisdhfiuahdfuihai
ufdhaihfiuhfiuahfidhfiahiudfhaiufhdiuahadjhfiahdfiahdufhaisdhfiuahdfuihaiufdhaihfiuhfiuahfidhfiahiudfhaiufhdiuah
adjhfiahdfiahdufhaisdhfiuahdfuihaiufdhaihfiuhfiuahfidhfiahiudfhaiufhdiuahadjhfiahdfiahdufhaisdhfiuahdfuihaiufdha
ihfiuhfiuahfidhfiahiudfhaiufhdiuahadjhfiahdfiahdufhaisdhfiuahdfuihaiufdhaihfiuhfiuahfidhfiahiudfhaiufhdiuahadjhf
iahdfiahdufhaisdhfiuahdfuihaiufdhaihfiuhfiuahfidhfiahiudfhaiufhdiuahadjhfiahdfiahdufhaisdhfiuahdfuihaiufdhaihfiu
hfiuahfidhfiahiudfhaiufhdiuah"""
self.description_min = "as"
self.description_valid = "Examina alguma coisa"
custom_exam = CustomExam()
custom_exam.name = "Invalido"
user = HealthProfessional()
user.crm = "54321"
user.save()
custom_exam.health_professional_FK = user
custom_exam.pk = 1
custom_exam.save()
def test_valid(self):
form_data = {'name': self.name_valid,
'description': self.description_valid
}
form = UpdateCustomExamForm(data=form_data)
form.get_pk(1)
self.assertTrue(form.is_valid())
def test_invalid_max_name(self):
form_data = {'name': self.name_max,
'description': self.description_valid
}
form = UpdateCustomExamForm(data=form_data)
form.get_pk(1)
self.assertFalse(form.is_valid())
def test_invalid_min_name(self):
form_data = {'name': self.name_min,
'description': self.description_valid
}
form = UpdateCustomExamForm(data=form_data)
form.get_pk(1)
self.assertFalse(form.is_valid())
def test_invalid_max_description(self):
form_data = {'name': self.name_valid,
'description': self.description_max
}
form = UpdateCustomExamForm(data=form_data)
form.get_pk(1)
self.assertFalse(form.is_valid())
def test_invalid_min_description(self):
form_data = {'name': self.name_valid,
'description': self.description_min
}
form = UpdateCustomExamForm(data=form_data)
form.get_pk(1)
self.assertFalse(form.is_valid())
|
py
|
1a59fb277ad34d1926f99250065fea340b3e3a7b
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 CERN.
#
# Flask-Resources is free software; you can redistribute it and/or modify
# it under the terms of the MIT License; see LICENSE file for more details.
"""Flask Resources module to create REST APIs."""
from werkzeug.exceptions import HTTPException
from ..context import resource_requestctx
from ..parsers import search_request_parser
from .base import BaseView
class ListView(BaseView):
"""List view representation.
Allows searching and creating an item in the list.
"""
def __init__(self, *args, **kwargs):
"""Constructor."""
super(ListView, self).__init__(*args, **kwargs)
# is defined by response_handler decorator
self.response_handler = None
# is defined by response_loader decorator
self.request_loader = None
def get(self, *args, **kwargs):
"""Search the collection."""
resource_requestctx.update(self.request_loader.load_search_request())
# TODO: Make it so that you don't have to return a tuple. See issue #55
return self.response_handler.make_list_response(
*self.resource.search(*args, **kwargs)
)
def post(self, *args, **kwargs):
"""Create an item in the collection."""
resource_requestctx.update(self.request_loader.load_item_request())
return self.response_handler.make_item_response(
*self.resource.create(*args, **kwargs) # data is passed in the context
)
class ItemView(BaseView):
"""Item view representation.
Allows reading, (partial) updating and deleting an item.
"""
def __init__(self, *args, **kwargs):
"""Constructor."""
super(ItemView, self).__init__(*args, **kwargs)
# is defined by response_handler decorator
self.response_handler = None
# is defined by response_loader decorator
self.request_loader = None
def get(self, *args, **kwargs):
"""Get."""
return self.response_handler.make_item_response(
*self.resource.read(*args, **kwargs)
)
def put(self, *args, **kwargs):
"""Put."""
resource_requestctx.update(self.request_loader.load_item_request())
return self.response_handler.make_item_response(
*self.resource.update(*args, **kwargs) # data is passed in the context
)
def patch(self, *args, **kwargs):
"""Patch."""
resource_requestctx.update(self.request_loader.load_item_request())
return self.response_handler.make_item_response(
*self.resource.partial_update(*args, **kwargs)
)
def delete(self, *args, **kwargs):
"""Delete."""
return self.response_handler.make_item_response(
*self.resource.delete(*args, **kwargs)
)
|
py
|
1a59fb33f6d7f3512638cd4410e0b8480be8a4ae
|
import json
import requests
from web import config
CATEGORY = 1 # Magic the Gathering
PAGE_LENGTH = 100 # API's max items per page limit is 100
class TCGPlayerException(Exception):
pass
class NoResults(TCGPlayerException):
pass
def _send(
method: str,
endpoint: str,
params: any = None,
data: any = None,
token: str = None
) -> any:
"""Send a request to the TCGplayer API.
:param method: HTTP method to use
:type method: str
:param endpoint: API endpoint to send request to
:type endpoint: str
:param params: URL parameters, defaults to None
:type params: any, optional
:param data: Request data, defaults to None
:type data: any, optional
:param token: Bearer token, defaults to None
:type token: str, optional
:raises TCGPlayerException: An HTTP error occurred
:return: The response returned, decoded into an object
:rtype: any
"""
headers = {}
if token:
headers['Authorization'] = f'bearer {token}'
response = requests.request(
method,
f'https://api.tcgplayer.com{endpoint}',
headers=headers,
params=params,
data=data
)
try:
response.raise_for_status()
except requests.HTTPError as e:
code = e.response.status_code
if code == 404:
raise NoResults from e
raise TCGPlayerException from e
resp = json.loads(response.text)
return resp
def _get(endpoint, **kwargs):
"""Wrapper function for sending GET requests."""
return _send('GET', endpoint, **kwargs)
def _post(endpoint, **kwargs):
"""Wrapper function for sending POST requests."""
return _send('POST', endpoint, **kwargs)
def login() -> str:
data = {
'grant_type': 'client_credentials',
'client_id': config.TCGPLAYER_PUBLICKEY,
'client_secret': config.TCGPLAYER_PRIVATEKEY
}
token = _post(
'/token',
data=data
)['access_token']
return token
def _offset(page):
"""Convert a page number into an offset."""
return (page - 1) * PAGE_LENGTH
def _all_pages(callback, **kwargs):
"""Get all pages available for a given endpoint.
:param callback: Callback function getting the pages
:type callback: function
"""
page = 1
results = []
token = login()
while True:
try:
resp = callback(page, token, **kwargs)
except NoResults:
# 404, meaning no more results
break
if len(resp) == 0:
# Backup to prevent infinite loop, in case API stops 404-ing
break
results += resp
page += 1
return results
def get_groups(page, token):
params = {
'offset': _offset(page),
'limit': PAGE_LENGTH,
}
resp = _get(
f'/catalog/categories/{CATEGORY}/groups',
params=params,
token=token
)
groups = resp['results']
return groups
def get_all_groups():
return _all_pages(get_groups)
def get_products(page, token, groupid=None):
params = {
'productTypes': 'Cards',
'categoryId': CATEGORY, # Only MTG cards
'groupid': groupid,
'offset': _offset(page),
'limit': PAGE_LENGTH,
'getExtendedFields': True
}
resp = _get(
'/catalog/products',
params=params,
token=token
)
products = resp['results']
for p in products:
# Convert extended data to dictionary
extras = {}
for data in p['extendedData']:
extras[data['name']] = data['value']
p['extendedData'] = extras
return products
def get_all_products(groupid):
return _all_pages(get_products, groupid=groupid)
def get_product_prices(products):
productids = ','.join([str(p) for p in products])
token = login()
return _get(f'/pricing/product/{productids}', token=token)['results']
|
py
|
1a59fc6814cf8eac0e1f9448438d35e92c1e8952
|
"""
BaBi dataset dictionary
"""
babi_map = {
"1": "qa1_single-supporting-fact",
"2": "qa2_two-supporting-facts",
"3": "qa3_three-supporting-facts",
"4": "qa4_two-arg-relations",
"5": "qa5_three-arg-relations",
"6": "qa6_yes-no-questions",
"7": "qa7_counting",
"8": "qa8_lists-sets",
"9": "qa9_simple-negation",
"10": "qa10_indefinite-knowledge",
"11": "qa11_basic-coreference",
"12": "qa12_conjunction",
"13": "qa13_compound-coreference",
"14": "qa14_time-reasoning",
"15": "qa15_basic-deduction",
"16": "qa16_basic-induction",
"17": "qa17_positional-reasoning",
"18": "qa18_size-reasoning",
"19": "qa19_path-finding",
"20": "qa20_agents-motivations",
"MCTest": "MCTest",
"19changed": "19changed",
"joint": "all_shuffled",
"sh1": "../shuffled/qa1_single-supporting-fact",
"sh2": "../shuffled/qa2_two-supporting-facts",
"sh3": "../shuffled/qa3_three-supporting-facts",
"sh4": "../shuffled/qa4_two-arg-relations",
"sh5": "../shuffled/qa5_three-arg-relations",
"sh6": "../shuffled/qa6_yes-no-questions",
"sh7": "../shuffled/qa7_counting",
"sh8": "../shuffled/qa8_lists-sets",
"sh9": "../shuffled/qa9_simple-negation",
"sh10": "../shuffled/qa10_indefinite-knowledge",
"sh11": "../shuffled/qa11_basic-coreference",
"sh12": "../shuffled/qa12_conjunction",
"sh13": "../shuffled/qa13_compound-coreference",
"sh14": "../shuffled/qa14_time-reasoning",
"sh15": "../shuffled/qa15_basic-deduction",
"sh16": "../shuffled/qa16_basic-induction",
"sh17": "../shuffled/qa17_positional-reasoning",
"sh18": "../shuffled/qa18_size-reasoning",
"sh19": "../shuffled/qa19_path-finding",
"sh20": "../shuffled/qa20_agents-motivations",
}
|
py
|
1a59fcbea2f81d76fc40d6a3d4ffc20e7fc9066d
|
import sys
try:
from pathlib import Path
except ImportError:
# noinspection PyUnresolvedReferences
from pathlib2 import Path
__version__ = "1.0.0"
__vendor_site__ = (Path(__file__).parent / "_vendor").as_posix()
if __vendor_site__ not in sys.path:
sys.path.insert(0, __vendor_site__)
|
py
|
1a59fce227c1b8c722cece5c89927f0810913fae
|
import django
from .entities import Guest
def pytest_configure():
from django.conf import settings
settings.configure(
ROOT_URLCONF='tests.urls',
REST_FRAMEWORK={
'DEFAULT_RENDERER_CLASSES': ('winter.json_renderer.JSONRenderer',),
'UNAUTHENTICATED_USER': Guest,
},
TEMPLATES=[
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'OPTIONS': {
'debug': True, # We want template errors to raise
},
},
],
INSTALLED_APPS=(
'tests',
),
)
django.setup()
|
py
|
1a59fd66d65ce5cdee868a871fcb4d24449e5d7f
|
from django.views.generic import TemplateView, ListView
from django.contrib.auth.models import User
class HomeView(TemplateView):
template_name = 'index.html'
class PostRelatedViewMixin(object):
def get_context_data(self, **kwargs):
ctx = super(PostRelatedViewMixin, self).get_context_data(**kwargs)
ctx['concrete_model'] = self.model
return ctx
class Authors(ListView):
template_name = 'authors.html'
model = User
queryset = User.objects.all()
|
py
|
1a59ff28e620da729ac7f87da4900ac09dcd0f02
|
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from collections import defaultdict
from . import Command
from ..benchmarks import Benchmarks
from ..machine import iter_machine_files
from ..results import iter_results_for_machine, iter_results_for_machine_and_hash
from ..runner import format_benchmark_result
from ..repo import get_repo, NoSuchNameError
from ..util import load_json
from ..console import log, color_print
from ..environment import get_environments
from .. import util
from . import common_args
class Show(Command):
@classmethod
def setup_arguments(cls, subparsers):
parser = subparsers.add_parser(
"show", help="Print recorded data.",
description="""Print saved benchmark results.""")
parser.add_argument(
'commit', nargs='?', default=None,
help="""The commit to show data for.""")
parser.add_argument(
'--details', action='store_true', default=False,
help="""Show all result details.""")
common_args.add_bench(parser)
common_args.add_machine(parser)
common_args.add_environment(parser)
parser.set_defaults(func=cls.run_from_args)
return parser
@classmethod
def run_from_conf_args(cls, conf, args, **kwargs):
return cls.run(
conf=conf, commit=args.commit, bench=args.bench,
machine=args.machine, env_spec=args.env_spec,
details=args.details, **kwargs
)
@classmethod
def run(cls, conf, commit=None, bench=None, machine=None, env_spec=None,
details=False):
if env_spec:
env_names = ([env.name for env in get_environments(conf, env_spec, verbose=False)]
+ list(env_spec))
else:
env_names = None
machines = []
for path in iter_machine_files(conf.results_dir):
d = load_json(path)
machines.append(d['machine'])
if len(machines) == 0:
raise util.UserError("No results found")
elif machine is None:
pass
elif machine in machines:
machines = [machine]
else:
raise util.UserError(
"Results for machine '{0} not found".format(machine))
benchmarks = Benchmarks.load(conf, regex=bench)
if commit is None:
cls._print_commits(conf, machines, env_names, benchmarks)
else:
cls._print_results(conf, commit, machines, env_names, benchmarks,
show_details=details)
@classmethod
def _print_commits(cls, conf, machines, env_names, benchmarks):
commits = defaultdict(lambda: {})
for machine in machines:
for result in iter_results_for_machine(
conf.results_dir, machine):
if env_names is not None and result.env_name not in env_names:
continue
if result.get_result_keys(benchmarks):
commits[(machine, result.env_name)][result.commit_hash] = result.date
log.flush()
color_print("Commits with results:")
color_print("")
for machine, env_name in sorted(commits.keys()):
color_print("Machine : {}".format(machine))
color_print("Environment: {}".format(env_name))
color_print("")
cur_commits = commits[(machine, env_name)]
commit_order = list(cur_commits.keys())
commit_order.sort(key=lambda x: cur_commits[x])
for commit in commit_order:
color_print(" {}".format(commit[:conf.hash_length]))
color_print("")
@classmethod
def _print_results(cls, conf, commit_hash, machines, env_names, benchmarks,
show_details=False):
repo = get_repo(conf)
try:
commit_hash = repo.get_hash_from_name(commit_hash)
except NoSuchNameError:
pass
def results_iter():
for machine in sorted(machines):
for result in iter_results_for_machine_and_hash(
conf.results_dir, machine, commit_hash):
if env_names is not None and result.env_name not in env_names:
continue
yield machine, result
color_print("Commit: {}".format(repo.get_decorated_hash(commit_hash,
conf.hash_length)),
"blue")
color_print("")
for machine, result in results_iter():
for name in sorted(result.get_result_keys(benchmarks)):
cls._print_benchmark(machine, result, benchmarks[name],
show_details=show_details)
@classmethod
def _print_benchmark(cls, machine, result, benchmark, show_details=False):
color_print("{} [{}/{}]".format(benchmark['name'],
machine,
result.env_name),
'green')
info, details = format_benchmark_result(result, benchmark)
color_print(" {}".format(info), 'red')
if details:
color_print(" " + details.replace("\n", "\n "))
started_at = result.started_at.get(benchmark['name'])
ended_at = result.ended_at.get(benchmark['name'])
if started_at and ended_at:
started_at = util.js_timestamp_to_datetime(started_at)
ended_at = util.js_timestamp_to_datetime(ended_at)
color_print(' started: {}, duration: {}'.format(
started_at.strftime('%Y-%m-%d %H:%M:%S'),
util.human_time((ended_at - started_at).total_seconds())))
if not show_details:
color_print("")
return
stats = result.get_result_stats(benchmark['name'], benchmark['params'])
def get_stat_info(key):
return [x.get(key) if x is not None else None for x in stats]
for key in ['repeat', 'number', 'ci_99', 'mean', 'std', 'min', 'max']:
values = get_stat_info(key)
if key == 'ci_99':
values = ["({}, {})".format(util.human_value(x[0], benchmark['unit']),
util.human_value(x[1], benchmark['unit']))
if x is not None else None
for x in values]
elif any(isinstance(x, float) for x in values):
values = [util.human_value(x, benchmark['unit']) if x is not None else None
for x in values]
if not all(x is None for x in values):
color_print(" {}: {}".format(key, ", ".join(map(str, values))))
samples = result.get_result_samples(benchmark['name'], benchmark['params'])
if not all(x is None for x in samples):
color_print(" samples: {}".format(samples))
color_print("")
|
py
|
1a59ff3d49df131f9536260f111840a68525b875
|
import os
from django.conf import settings
from .constants import *
def snake_to_camel_case(text_snake):
return '{}'.format(
text_snake.title().replace('_', ''),
)
def create_file(directory, file_name, file_content):
path = os.path.join(directory, file_name)
if not os.path.isfile(path):
with open(path, 'w') as f:
f.write(file_content)
def create_or_append_file(directory, file_name, file_content):
path = os.path.join(directory, file_name)
if not os.path.isfile(path):
create_file(directory, file_name, file_content)
else:
with open(path, 'r') as f:
lines = f.readlines()
if file_content in lines:
return
with open(path, 'a') as f:
f.write(file_content)
def generate_page(app, page, base):
base_directory = os.path.abspath(os.path.join(settings.BASE_DIR))
available_bases = ['page', 'list', 'search']
if base not in available_bases:
raise Exception(f'Неправильно выбран параметр base. Допустимые значения: {", ".join(available_bases)}')
# create models
directory = os.path.join(base_directory, os.path.dirname(FILE_NAME_MODEL.format(app=app, page=page)))
os.makedirs(
directory,
exist_ok=True
)
# models page
base_name = os.path.basename(FILE_NAME_MODEL.format(app=app, page=page))
content = FILE_CONTENT_MODEL_DICT[base].format(app=app, page=page, page_capitalize=snake_to_camel_case(page))
create_file(directory, base_name, content)
# models init
base_name = os.path.basename(FILE_NAME_MODEL_INIT.format(app=app, page=page))
content = FILE_CONTENT_MODEL_INIT.format(page=page, page_capitalize=snake_to_camel_case(page))
create_or_append_file(directory, base_name, content)
print(f'Не забудьте поменять verbose_name и verbose_name_plural в файлах: backend/{FILE_NAME_MODEL.format(app=app, page=page)}')
# create admin
directory = os.path.join(base_directory, os.path.dirname(FILE_NAME_ADMIN.format(app=app, page=page)))
os.makedirs(
directory,
exist_ok=True
)
# admin page
base_name = os.path.basename(FILE_NAME_ADMIN.format(app=app, page=page))
content = FILE_CONTENT_ADMIN.format(app=app, page=page, page_capitalize=snake_to_camel_case(page))
create_file(directory, base_name, content)
# admin init
base_name = os.path.basename(FILE_NAME_ADMIN_INIT.format(app=app, page=page))
content = FILE_CONTENT_ADMIN_INIT.format(page=page, page_capitalize=snake_to_camel_case(page))
create_or_append_file(directory, base_name, content)
# create translation
directory = os.path.join(base_directory, os.path.dirname(FILE_NAME_TRANSLATION.format(app=app, page=page)))
os.makedirs(
directory,
exist_ok=True
)
# translation page
base_name = os.path.basename(FILE_NAME_TRANSLATION.format(app=app, page=page))
content = FILE_CONTENT_TRANSLATION.format(app=app, page=page, page_capitalize=snake_to_camel_case(page))
create_file(directory, base_name, content)
# translation init
base_name = os.path.basename(FILE_NAME_TRANSLATION_INIT.format(app=app, page=page))
content = FILE_CONTENT_TRANSLATION_INIT.format(page=page, page_capitalize=snake_to_camel_case(page))
create_or_append_file(directory, base_name, content)
# create template
directory = os.path.join(base_directory, os.path.dirname(FILE_NAME_TEMPLATE.format(page=page)))
os.makedirs(
directory,
exist_ok=True
)
# template page
base_name = os.path.basename(FILE_NAME_TEMPLATE.format(page=page))
content = FILE_CONTENT_TEMPLATE_DICT[base]
create_file(directory, base_name, content)
# create app init
directory = os.path.join(base_directory, os.path.dirname(FILE_NAME_APP_INIT.format(app=app)))
os.makedirs(
directory,
exist_ok=True
)
# app init
base_name = os.path.basename(FILE_NAME_APP_INIT.format(app=app))
content = FILE_CONTENT_APP_INIT.format(app=app, app_capitalize=snake_to_camel_case(app))
create_file(directory, base_name, content)
# create app apps
directory = os.path.join(base_directory, os.path.dirname(FILE_NAME_APPS.format(app=app)))
os.makedirs(
directory,
exist_ok=True
)
# app apps
base_name = os.path.basename(FILE_NAME_APPS.format(app=app))
content = FILE_CONTENT_APPS.format(app=app, app_capitalize=snake_to_camel_case(app))
create_file(directory, base_name, content)
print(f'Не забудьте проверить, что verbose_name указан верно в файле: backend/{FILE_NAME_APPS.format(app=app)}')
# create migrations
directory = os.path.join(base_directory, os.path.dirname(FILE_NAME_MIGRATIONS_INIT.format(app=app)))
os.makedirs(
directory,
exist_ok=True
)
# app apps
base_name = os.path.basename(FILE_NAME_MIGRATIONS_INIT.format(app=app))
create_file(directory, base_name, '')
|
py
|
1a5a012a47c9d7eba1dd90c516ba28b009619081
|
#!/usr/bin/env python
__author__ = "Mari Wahl"
__copyright__ = "Copyright 2014, The Cogent Project"
__credits__ = ["Mari Wahl"]
__license__ = "GPL"
__version__ = "4.1"
__maintainer__ = "Mari Wahl"
__email__ = "[email protected]"
from helpers import running, constants
# change here for type of net:
NETWORK_FILES = constants.NETWORK_FILES_DIR_COMMUNICATION + constants.NETWORK_FILES_UN_COMMUNICATION
TYPE_NET_DIR = "communication/"
def main():
running.sampling(NETWORK_FILES, TYPE_NET_DIR)
print("All graphs for " + TYPE_NET_DIR + " were processed. The end! \n")
if __name__ == '__main__':
main()
|
py
|
1a5a0173a5f207cc3d6347559e026e1da277a35c
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from parlai.mturk.core.worlds import MTurkOnboardWorld, MTurkTaskWorld
from parlai.mturk.core.agents import (
MTURK_DISCONNECT_MESSAGE,
RETURN_MESSAGE,
TIMEOUT_MESSAGE,
)
import threading
import time
def is_disconnected(act):
return 'text' in act and act['text'] in [
MTURK_DISCONNECT_MESSAGE,
RETURN_MESSAGE,
TIMEOUT_MESSAGE,
]
class LightChatOnboardingWorld(MTurkOnboardWorld):
"""
Example onboarding world.
Sends a message from the world to the worker and then exits as complete after the
worker uses the interface
"""
instruction_act = {
'id': 'System',
'text': 'Please attempt to take a turn given the setting and persona '
'on the left. This is where your information will appear in '
'the main task.',
'task_data': {
'base_name': 'bandit',
'persona': (
"I am an unforgiving bandit. Orcs have stolen my family away,"
" and they treat me like trash. I one day want to have my "
"revenge, be it through blood or gold. I'll try to get back at"
" them any chance I get."
),
'setting': (
"You are in a bar. There isn't anything really special about "
"it, and it's relatively quiet tonight. The air is somewhat "
"tense. A sign above the bar says 'No Fighting' and something "
"tells you that rule is taken pretty seriously. "
"There is a beer here. There is an orc here. "
),
'actions': [
'wave at orc',
'steal coin purse from orc',
'hit orc',
'give beer to orc',
'hug orc',
'get beer',
],
},
}
bad_choice_act = {
'id': 'System',
'text': "Are you sure that's an appropriate action to take given your "
"persona and the current setting? Try again.",
}
too_short_act = {
'id': 'System',
'text': "Please generally speak in full sentences unless your persona "
"implies that your character isn't able to.",
}
block_act = {
'id': 'System',
'text': "Sorry, you've exceeded the maximum amount of tries to get the "
"correct actions given your persona and the setting, and thus we "
"don't believe you can complete the task correctly. Please return "
"the HIT.",
}
complete_act = {
'id': 'System',
'text': "Passed - We'll be pairing you with a partner. Hold on tight.",
}
def block_loop(self):
print('Worker {} failed onboarding'.format(self.mturk_agent.worker_id))
self.mturk_agent.observe(self.block_act)
self.mturk_agent.mturk_manager.soft_block_worker(self.mturk_agent.worker_id)
act = self.mturk_agent.act()
while not is_disconnected(act):
self.mturk_agent.observe(self.block_act)
act = self.mturk_agent.act()
return True
def parley(self):
self.turns = 0
self.mturk_agent.update_agent_id('Bandit')
self.mturk_agent.observe(self.instruction_act)
act = self.mturk_agent.act() # first attempt, turns = 0
data = act.get('task_data', {'action': None})
while data['action'] != 'steal coin purse from orc' or len(act['text']) < 4:
if self.turns >= 2: # if 3rd attempt wasn't correct, block worker
self.block_loop()
self.episodeDone = True
return
if is_disconnected(act):
self.episodeDone = True
return
if data['action'] != 'steal coin purse from orc':
self.mturk_agent.observe(self.bad_choice_act)
else:
self.mturk_agent.observe(self.too_short_act)
self.turns += 1
act = self.mturk_agent.act()
data = act.get('task_data', {'action': None})
self.mturk_agent.observe(self.complete_act)
self.mturk_agent.onboarding_turns = self.turns
self.episodeDone = True
time.sleep(3)
class LightChatTaskWorld(MTurkTaskWorld):
"""
World to demonstrate workers with assymetric roles.
This task amounts to three rounds and then an evaluation step. It is purposefully
created as a task to demo multiple views and has no other purpose.
"""
collector_agent_id = 'Moderator'
def __init__(self, opt, mturk_agents, graph, room, characters):
self.mturk_agents = mturk_agents
self.graph = graph
self.room = room
self.characters = characters
# Extract the character names
self.c_names = [characters[0][0].lower(), characters[1][0].lower()]
self.graph_copy = graph.copy()
self.mturk_agents[0].update_agent_id(self.c_names[0].capitalize())
self.mturk_agents[1].update_agent_id(self.c_names[1].capitalize())
self.episodeDone = False
self.turns = 0
self.acts = []
self.graph.freeze(True)
def get_context_actions_for(self, agent_name):
self.graph.parse_exec(agent_name, 'look')
self.graph.parse_exec(agent_name, 'inv')
context = self.graph.get_text(agent_name).rstrip('\n')
use_actions = [
'get',
'put',
'drink',
'eat',
'steal',
'hit',
'hug',
'wear',
'wield',
'drop',
'give',
'remove',
]
actions = self.graph.get_possible_actions(agent_name, use_actions=use_actions)
return context, actions
def parley(self):
if self.turns == 0:
# Settings for both
for i in [0, 1]:
agent_name = self.c_names[i]
self.graph.get_text(agent_name).rstrip('\n')
context, actions = self.get_context_actions_for(agent_name)
ad = {
'id': 'System',
'text': "Your chat partner is: {}. "
"Please chat for 8 full turns "
"while pretending to be your assigned "
"persona in the assigned setting, both "
"provided in the 'context' tab of the left panel. "
"After the first turn you will need to respond within "
"5 minutes to avoid timing out."
"If unsure what to talk about, start "
"getting to know your partner's persona, or "
"discuss the setting. Take actions when/if it "
"feels appropriate to. "
"Any other characters in the room will not interact "
"with or respond to you, so while they may be good "
"things to talk about, don't interact with them."
"You can find the original instructions on the "
"'Task Instructions' tab to the left."
"".format(self.c_names[1 - i]),
'task_data': {
'base_name': self.c_names[i],
'persona': self.characters[i][1]['personas'][0],
'setting': context,
'actions': actions,
},
}
self.mturk_agents[i].observe(ad)
if self.turns < 7:
for i in [0, 1]:
cur_agent = self.mturk_agents[i]
other_agent = self.mturk_agents[1 - i]
cur_agent_name = self.c_names[i]
other_agent_name = self.c_names[1 - i]
if self.turns == 0 and i == 0:
a = cur_agent.act()
else:
a = cur_agent.act(timeout=5 * 60)
self.acts.append(a)
if is_disconnected(a):
self.episodeDone = True
return
graph_action = a.get('task_data', {'action': ''})['action']
observe_action = {
'id': cur_agent_name.capitalize(),
'text': a['text'],
'task_data': {},
}
if graph_action.startswith('gesture'):
observe_action['task_data']['action'] = graph_action
elif graph_action != '':
# execute graph action
status, c_acts_text = self.graph.parse_exec(
cur_agent_name, graph_action
)
if status:
self.graph.update_world()
# send new setting and actions to the actor
return_act_text = self.graph.get_text(cur_agent_name).rstrip('\n')
if status:
observe_action['task_data']['action'] = self.graph.get_text(
other_agent_name
).rstrip('\n')
context, actions = self.get_context_actions_for(cur_agent_name)
reflex_action = {
'id': 'System',
'text': return_act_text,
'task_data': {'setting': context, 'actions': actions},
}
cur_agent.observe(reflex_action)
# Set the viewer context change and new actions
context, actions = self.get_context_actions_for(other_agent_name)
observe_action['task_data']['setting'] = context
observe_action['task_data']['actions'] = actions
other_agent.observe(observe_action)
self.turns += 1
else:
# evaluate
ad = {
'id': 'System',
'text': "Thank you for the talk, the chat is complete.",
}
for agent in self.mturk_agents:
agent.observe(ad)
self.episodeDone = True
def episode_done(self):
return self.episodeDone
def shutdown(self):
# Parallel shutdown of agents
def shutdown_agent(agent):
try:
agent.shutdown(timeout=None)
except Exception:
agent.shutdown() # not MTurkAgent
threads = []
for agent in self.mturk_agents:
t = threading.Thread(target=shutdown_agent, args=(agent,))
t.start()
threads.append(t)
for t in threads:
t.join()
def review_work(self):
# Can review the work here to accept or reject it
pass
def get_custom_task_data(self):
# brings important data together for the task, to later be used for
# creating the dataset. If data requires pickling, put it in a field
# called 'needs-pickle'.
return {
'acts': self.acts,
'room': self.room,
'characters': self.characters,
'needs-pickle': self.graph_copy,
}
|
py
|
1a5a01fd49fa864aa43eec870b947083b81c0905
|
#!/usr/bin/env python
# Copyright 2017 Calico LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
from __future__ import print_function
from optparse import OptionParser
import json
import pdb
import pickle
import os
from queue import Queue
import sys
from threading import Thread
import time
import h5py
import numpy as np
import pandas as pd
import pysam
import tensorflow as tf
if tf.__version__[0] == '1':
tf.compat.v1.enable_eager_execution()
from basenji import seqnn
from basenji import stream
from basenji import vcf as bvcf
'''
basenji_sad.py
Compute SNP Activity Difference (SAD) scores for SNPs in a VCF file.
'''
################################################################################
# main
################################################################################
def main():
usage = 'usage: %prog [options] <params_file> <model_file> <vcf_file>'
parser = OptionParser(usage)
parser.add_option('-f', dest='genome_fasta',
default='%s/data/hg38.fa' % os.environ['BASENJIDIR'],
help='Genome FASTA for sequences [Default: %default]')
parser.add_option('-n', dest='norm_file',
default=None,
help='Normalize SAD scores')
parser.add_option('-o',dest='out_dir',
default='sad',
help='Output directory for tables and plots [Default: %default]')
parser.add_option('-p', dest='processes',
default=None, type='int',
help='Number of processes, passed by multi script')
parser.add_option('--pseudo', dest='log_pseudo',
default=1, type='float',
help='Log2 pseudocount [Default: %default]')
parser.add_option('--rc', dest='rc',
default=False, action='store_true',
help='Average forward and reverse complement predictions [Default: %default]')
parser.add_option('--shifts', dest='shifts',
default='0', type='str',
help='Ensemble prediction shifts [Default: %default]')
parser.add_option('--stats', dest='sad_stats',
default='SAD',
help='Comma-separated list of stats to save. [Default: %default]')
parser.add_option('-t', dest='targets_file',
default=None, type='str',
help='File specifying target indexes and labels in table format')
parser.add_option('--ti', dest='track_indexes',
default=None, type='str',
help='Comma-separated list of target indexes to output BigWig tracks')
parser.add_option('--threads', dest='threads',
default=False, action='store_true',
help='Run CPU math and output in a separate thread [Default: %default]')
# parser.add_option('-u', dest='penultimate',
# default=False, action='store_true',
# help='Compute SED in the penultimate layer [Default: %default]')
(options, args) = parser.parse_args()
if len(args) == 3:
# single worker
params_file = args[0]
model_file = args[1]
vcf_file = args[2]
elif len(args) == 4:
# multi separate
options_pkl_file = args[0]
params_file = args[1]
model_file = args[2]
vcf_file = args[3]
# save out dir
out_dir = options.out_dir
# load options
options_pkl = open(options_pkl_file, 'rb')
options = pickle.load(options_pkl)
options_pkl.close()
# update output directory
options.out_dir = out_dir
elif len(args) == 5:
# multi worker
options_pkl_file = args[0]
params_file = args[1]
model_file = args[2]
vcf_file = args[3]
worker_index = int(args[4])
# load options
options_pkl = open(options_pkl_file, 'rb')
options = pickle.load(options_pkl)
options_pkl.close()
# update output directory
options.out_dir = '%s/job%d' % (options.out_dir, worker_index)
else:
parser.error('Must provide parameters and model files and QTL VCF file')
if not os.path.isdir(options.out_dir):
os.mkdir(options.out_dir)
if options.track_indexes is None:
options.track_indexes = []
else:
options.track_indexes = [int(ti) for ti in options.track_indexes.split(',')]
if not os.path.isdir('%s/tracks' % options.out_dir):
os.mkdir('%s/tracks' % options.out_dir)
options.shifts = [int(shift) for shift in options.shifts.split(',')]
options.sad_stats = options.sad_stats.split(',')
#################################################################
# read parameters and targets
# read model parameters
with open(params_file) as params_open:
params = json.load(params_open)
params_model = params['model']
params_train = params['train']
if options.targets_file is None:
target_slice = None
else:
targets_df = pd.read_csv(options.targets_file, sep='\t', index_col=0)
target_ids = targets_df.identifier
target_labels = targets_df.description
target_slice = targets_df.index
#################################################################
# setup model
# can we sum on GPU?
length_stats = set(['SAX','SAXR','SAR','ALT','REF'])
sum_length = length_stats.isdisjoint(set(options.sad_stats))
sum_length = False # minimal influence
seqnn_model = seqnn.SeqNN(params_model)
seqnn_model.restore(model_file)
seqnn_model.build_slice(target_slice)
if sum_length:
seqnn_model.build_sad()
seqnn_model.build_ensemble(options.rc, options.shifts)
targets_length = seqnn_model.target_lengths[0]
num_targets = seqnn_model.num_targets()
if options.targets_file is None:
target_ids = ['t%d' % ti for ti in range(num_targets)]
target_labels = ['']*len(target_ids)
#################################################################
# load SNPs
# filter for worker SNPs
if options.processes is not None:
# determine boundaries
num_snps = bvcf.vcf_count(vcf_file)
worker_bounds = np.linspace(0, num_snps, options.processes+1, dtype='int')
# read SNPs form VCF
snps = bvcf.vcf_snps(vcf_file, start_i=worker_bounds[worker_index],
end_i=worker_bounds[worker_index+1])
else:
# read SNPs form VCF
snps = bvcf.vcf_snps(vcf_file)
num_snps = len(snps)
# open genome FASTA
genome_open = pysam.Fastafile(options.genome_fasta)
def snp_gen():
for snp in snps:
# get SNP sequences
snp_1hot_list = bvcf.snp_seq1(snp, params_model['seq_length'], genome_open)
for snp_1hot in snp_1hot_list:
yield snp_1hot
#################################################################
# setup output
sad_out = initialize_output_h5(options.out_dir, options.sad_stats,
snps, target_ids, target_labels, targets_length)
if options.threads:
snp_threads = []
snp_queue = Queue()
for i in range(1):
sw = SNPWorker(snp_queue, sad_out, options.sad_stats, options.log_pseudo)
sw.start()
snp_threads.append(sw)
#################################################################
# predict SNP scores, write output
# initialize predictions stream
preds_stream = stream.PredStreamGen(seqnn_model, snp_gen(), params_train['batch_size'])
# predictions index
pi = 0
for si in range(num_snps):
# get predictions
ref_preds = preds_stream[pi]
pi += 1
alt_preds = preds_stream[pi]
pi += 1
if options.threads:
# queue SNP
snp_queue.put((ref_preds, alt_preds, si))
else:
# process SNP
if sum_length:
print('Length summed')
write_snp(ref_preds, alt_preds, sad_out, si,
options.sad_stats, options.log_pseudo)
else:
write_snp_len(ref_preds, alt_preds, sad_out, si,
options.sad_stats, options.log_pseudo)
if options.threads:
# finish queue
print('Waiting for threads to finish.', flush=True)
snp_queue.join()
# close genome
genome_open.close()
###################################################
# compute SAD distributions across variants
write_pct(sad_out, options.sad_stats)
sad_out.close()
def initialize_output_h5(out_dir, sad_stats, snps, target_ids, target_labels, targets_length):
"""Initialize an output HDF5 file for SAD stats."""
num_targets = len(target_ids)
num_snps = len(snps)
sad_out = h5py.File('%s/sad.h5' % out_dir, 'w')
# write SNPs
snp_ids = np.array([snp.rsid for snp in snps], 'S')
sad_out.create_dataset('snp', data=snp_ids)
# write SNP chr
snp_chr = np.array([snp.chr for snp in snps], 'S')
sad_out.create_dataset('chr', data=snp_chr)
# write SNP pos
snp_pos = np.array([snp.pos for snp in snps], dtype='uint32')
sad_out.create_dataset('pos', data=snp_pos)
# check flips
snp_flips = [snp.flipped for snp in snps]
# write SNP reference allele
snp_refs = []
snp_alts = []
for snp in snps:
if snp.flipped:
snp_refs.append(snp.alt_alleles[0])
snp_alts.append(snp.ref_allele)
else:
snp_refs.append(snp.ref_allele)
snp_alts.append(snp.alt_alleles[0])
snp_refs = np.array(snp_refs, 'S')
snp_alts = np.array(snp_alts, 'S')
sad_out.create_dataset('ref_allele', data=snp_refs)
sad_out.create_dataset('alt_allele', data=snp_alts)
# write targets
sad_out.create_dataset('target_ids', data=np.array(target_ids, 'S'))
sad_out.create_dataset('target_labels', data=np.array(target_labels, 'S'))
# initialize SAD stats
for sad_stat in sad_stats:
if sad_stat in ['REF','ALT']:
sad_out.create_dataset(sad_stat,
shape=(num_snps, targets_length, num_targets),
dtype='float16')
else:
sad_out.create_dataset(sad_stat,
shape=(num_snps, num_targets),
dtype='float16')
return sad_out
def write_pct(sad_out, sad_stats):
"""Compute percentile values for each target and write to HDF5."""
# define percentiles
d_fine = 0.001
d_coarse = 0.01
percentiles_neg = np.arange(d_fine, 0.1, d_fine)
percentiles_base = np.arange(0.1, 0.9, d_coarse)
percentiles_pos = np.arange(0.9, 1, d_fine)
percentiles = np.concatenate([percentiles_neg, percentiles_base, percentiles_pos])
sad_out.create_dataset('percentiles', data=percentiles)
pct_len = len(percentiles)
for sad_stat in sad_stats:
if sad_stat not in ['REF','ALT']:
sad_stat_pct = '%s_pct' % sad_stat
# compute
sad_pct = np.percentile(sad_out[sad_stat], 100*percentiles, axis=0).T
sad_pct = sad_pct.astype('float16')
# save
sad_out.create_dataset(sad_stat_pct, data=sad_pct, dtype='float16')
def write_snp(ref_preds_sum, alt_preds_sum, sad_out, si, sad_stats, log_pseudo):
"""Write SNP predictions to HDF, assuming the length dimension has
been collapsed."""
# compare reference to alternative via mean subtraction
if 'SAD' in sad_stats:
sad = alt_preds_sum - ref_preds_sum
sad_out['SAD'][si,:] = sad.astype('float16')
# compare reference to alternative via mean log division
if 'SADR' in sad_stats:
sar = np.log2(alt_preds_sum + log_pseudo) \
- np.log2(ref_preds_sum + log_pseudo)
sad_out['SADR'][si,:] = sar.astype('float16')
def write_snp_len(ref_preds, alt_preds, sad_out, si, sad_stats, log_pseudo):
"""Write SNP predictions to HDF, assuming the length dimension has
been maintained."""
ref_preds = ref_preds.astype('float64')
alt_preds = alt_preds.astype('float64')
num_targets = ref_preds.shape[-1]
# sum across length
ref_preds_sum = ref_preds.sum(axis=0)
alt_preds_sum = alt_preds.sum(axis=0)
# compare reference to alternative via mean subtraction
if 'SAD' in sad_stats:
sad = alt_preds_sum - ref_preds_sum
sad_out['SAD'][si] = sad.astype('float16')
# compare reference to alternative via max subtraction
if 'SAX' in sad_stats:
sad_vec = (alt_preds - ref_preds)
max_i = np.argmax(np.abs(sad_vec), axis=0)
sax = sad_vec[max_i, np.arange(num_targets)]
sad_out['SAX'][si] = sax.astype('float16')
# compare reference to alternative via mean log division
if 'SADR' in sad_stats:
sar = np.log2(alt_preds_sum + log_pseudo) \
- np.log2(ref_preds_sum + log_pseudo)
sad_out['SADR'][si] = sar.astype('float16')
# compare reference to alternative via max subtraction
if 'SAXR' in sad_stats:
sar_vec = np.log2(alt_preds + log_pseudo) \
- np.log2(ref_preds + log_pseudo)
max_i = np.argmax(np.abs(sar_vec), axis=0)
saxr = sar_vec[max_i, np.arange(num_targets)]
sad_out['SAXR'][si] = saxr.astype('float16')
# compare geometric means
if 'SAR' in sad_stats:
sar_vec = np.log2(alt_preds + log_pseudo) \
- np.log2(ref_preds + log_pseudo)
geo_sad = sar_vec.sum(axis=0)
sad_out['SAR'][si] = geo_sad.astype('float16')
# predictions
if 'REF' in sad_stats:
sad_out['REF'][si] = ref_preds.astype('float16')
if 'ALT' in sad_stats:
sad_out['ALT'][si] = alt_preds.astype('float16')
class SNPWorker(Thread):
"""Compute summary statistics and write to HDF."""
def __init__(self, snp_queue, sad_out, stats, log_pseudo=1):
Thread.__init__(self)
self.queue = snp_queue
self.daemon = True
self.sad_out = sad_out
self.stats = stats
self.log_pseudo = log_pseudo
def run(self):
while True:
# unload predictions
ref_preds, alt_preds, szi = self.queue.get()
# write SNP
write_snp(ref_preds, alt_preds, self.sad_out, szi, self.stats, self.log_pseudo)
if szi % 32 == 0:
gc.collect()
# communicate finished task
self.queue.task_done()
################################################################################
# __main__
################################################################################
if __name__ == '__main__':
main()
|
py
|
1a5a02e5acc594a8343d270951aef9aff046565c
|
import FWCore.ParameterSet.Config as cms
fftSimParam = cms.PSet(
NumOfFFT_Points = cms.int32(2048), # Length of signal, This should be an integer number with power of 2
SamplingRepetition = cms.int32(10) # FS: Sampling repetition per ns [1/ns]
)
TofCharge_Test = cms.PSet(
TofVector = cms.vdouble(0.0, 35.0),
ChargeVect = cms.vdouble(3*6242, 3*6242),
TestSensorSize= cms.double(0.04) # cm2
)
SiHitPulseShapeParam =cms.PSet(
HitPulseParam = cms.vdouble(0.6294422, 99.999855, 40.371655, 1.0, 3.5/2.2) # 0.6294422, 99.999855, 40.371655, 1.0, 3.5/2.2
)
SiPadFrontEndBlock0 = cms.PSet(
GoodForSizeRange = cms.vdouble(0.0,0.0255), # cm2, range from minimum size through maximum size
MaxFEOutputVoltage = cms.double(700.0), # mV
#LimmiterEdgeCorrFactor = cms.double(1.5), # unitless, by default should be 1.5
ZCComp_LowerTsh = cms.double(-5.0), # mV
ZCComp_UpperTsh = cms.double(0.0), # mV
ArmingComp_LowerTsh = cms.double(5.0), # mV
ArmingComp_UpperTsh = cms.double(20.0), # mV
TIA_Shaper_Gain = cms.double(28.0), # V/V (the amplifier gain after TIA and Shaper1)
Tia_Rf = cms.double(5.0), # kOhm
Tia_Cf = cms.double(0.25), # pF
Tia_Cin_gs = cms.double(0.4), # pf (just the TIA input Capacitance), the SiPad Capacitance will be added to this
Tia_Co = cms.double(0.4), # pf
Tia_gin = cms.double(3.0), # mS
SensorCouplingCapacitance = cms.double(315.0), # pF
SensorCapPerCm2 = cms.double(86.207), # pF/cm2
Shaper1_Tau = cms.double(0.9), # ns
CFD_Delay = cms.double(2.0), # ns
CfdShaper_Gain = cms.double(1.5), # V/V
CfdShaper_Tau = cms.double(0.25), # ns
DelayModel = cms.string('FirstOrderPadeDelay'), # 'IdealDelay' or 'FirstOrderPadeDelay'
CFD_Fraction = cms.double(0.5), # between 0-1, typically around 0.5
lpGBT_AlignerDelay = cms.double(5.2), # ns
Bx_Duration = cms.double(25.0), # ns
ToAUpperCut = cms.double(30.0), # ns // for BIB study, more than one BX should be investigated
ToALowerCut = cms.double(-30.0), # ns // for BIB study, more than one BX should be investigated
BinLength = cms.double(6.26), # ns
BinOffset = cms.double(0.0), # ns
)
SiPadFrontEndBlock1 = SiPadFrontEndBlock0.clone();
SiPadFrontEndBlock2 = SiPadFrontEndBlock0.clone();
SiPadFrontEndBlock3 = SiPadFrontEndBlock0.clone();
SiPadFrontEndBlock4 = SiPadFrontEndBlock0.clone();
SiPadFrontEndBlock5 = SiPadFrontEndBlock0.clone();
SiPadFrontEndBlock6 = SiPadFrontEndBlock0.clone();
SiPadFrontEndBlock7 = SiPadFrontEndBlock0.clone();
SiPadFrontEndBlock8 = SiPadFrontEndBlock0.clone();
#----------------------------
SiPadFrontEndBlock0.GoodForSizeRange = cms.vdouble(0.0 , 0.0255) # cm2, range from minimum size through maximum size
SiPadFrontEndBlock1.GoodForSizeRange = cms.vdouble(0.0255, 0.0335) # cm2, range from minimum size through maximum size
SiPadFrontEndBlock2.GoodForSizeRange = cms.vdouble(0.0335, 0.046) # cm2, range from minimum size through maximum size
SiPadFrontEndBlock3.GoodForSizeRange = cms.vdouble(0.046 , 0.067) # cm2, range from minimum size through maximum size
SiPadFrontEndBlock4.GoodForSizeRange = cms.vdouble(0.067 , 0.1065) # cm2, range from minimum size through maximum size
SiPadFrontEndBlock5.GoodForSizeRange = cms.vdouble(0.1065, 0.1965) # cm2, range from minimum size through maximum size
SiPadFrontEndBlock6.GoodForSizeRange = cms.vdouble(0.1965, 0.491) # cm2, range from minimum size through maximum size
SiPadFrontEndBlock7.GoodForSizeRange = cms.vdouble(0.491 , 0.866) # cm2, range from minimum size through maximum size
SiPadFrontEndBlock8.GoodForSizeRange = cms.vdouble(0.866 , 100.0) # cm2, range from minimum size through maximum size
#-------------------------
SiPadFrontEndBlock1.CFD_Fraction = cms.double(0.55)
SiPadFrontEndBlock1.lpGBT_AlignerDelay = cms.double(5.5) # ns
SiPadFrontEndBlock1.TIA_Shaper_Gain = cms.double(28.0)
SiPadFrontEndBlock2.CFD_Fraction = cms.double(0.6)
SiPadFrontEndBlock2.lpGBT_AlignerDelay = cms.double(5.8) # ns
SiPadFrontEndBlock2.TIA_Shaper_Gain = cms.double(28.0)
SiPadFrontEndBlock3.CFD_Fraction = cms.double(0.65)
SiPadFrontEndBlock3.lpGBT_AlignerDelay = cms.double(6.2) # ns
SiPadFrontEndBlock3.TIA_Shaper_Gain = cms.double(31.0)
SiPadFrontEndBlock4.CFD_Fraction = cms.double(0.7)
SiPadFrontEndBlock4.lpGBT_AlignerDelay = cms.double(6.6) # ns
SiPadFrontEndBlock4.TIA_Shaper_Gain = cms.double(36.0)
SiPadFrontEndBlock5.CFD_Fraction = cms.double(0.75)
SiPadFrontEndBlock5.lpGBT_AlignerDelay = cms.double(7.1) # ns
SiPadFrontEndBlock5.TIA_Shaper_Gain = cms.double(46.0)
SiPadFrontEndBlock6.CFD_Fraction = cms.double(0.8)
SiPadFrontEndBlock6.lpGBT_AlignerDelay = cms.double(7.8) # ns
SiPadFrontEndBlock6.TIA_Shaper_Gain = cms.double(73.0)
SiPadFrontEndBlock7.CFD_Fraction = cms.double(0.85)
SiPadFrontEndBlock7.lpGBT_AlignerDelay = cms.double(8.6) # ns
SiPadFrontEndBlock7.TIA_Shaper_Gain = cms.double(167.0)
SiPadFrontEndBlock8.CFD_Fraction = cms.double(0.88)
SiPadFrontEndBlock8.lpGBT_AlignerDelay = cms.double(9.1) # ns
SiPadFrontEndBlock8.TIA_Shaper_Gain = cms.double(234.0)
|
py
|
1a5a031965ce7a4ea52b3e2527c07d54c9775f78
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from gaebusiness.business import CommandExecutionException
from gaecookie.decorator import no_csrf
from gaepermission.decorator import login_not_required
from tekton.gae.middleware.json_middleware import JsonResponse, JsonUnsecureResponse
from aluno_app import facade
@login_not_required
@no_csrf
def index():
cmd = facade.list_alunos_cmd()
aluno_list = cmd()
short_form = facade.aluno_short_form()
aluno_short = [short_form.fill_with_model(m) for m in aluno_list]
return JsonResponse(aluno_short)
@login_not_required
@no_csrf
def save(_resp, **aluno_properties):
cmd = facade.save_aluno_cmd(**aluno_properties)
return _save_or_update_json_response(_resp, cmd)
@login_not_required
@no_csrf
def update(_resp, id, **aluno_properties):
cmd = facade.update_aluno_cmd(id, **aluno_properties)
return _save_or_update_json_response(_resp, cmd)
@login_not_required
@no_csrf
def delete(id):
facade.delete_aluno_cmd(id)()
def _save_or_update_json_response(_resp, cmd):
try:
aluno = cmd()
except CommandExecutionException:
_resp.status_code = 500
return JsonUnsecureResponse(cmd.errors)
short_form = facade.aluno_short_form()
return JsonResponse(short_form.fill_with_model(aluno))
|
py
|
1a5a0461e878a67b73aa0a489371e664e6a88872
|
import json
from flask import Flask, request
from juggler import Juggler
app = Flask(__name__)
def apiresult(fn):
def _wrapper(*args, **kw):
result = None
error = None
try:
result = fn(*args, **kw)
except Exception as exception:
error = (
type(exception).__name__,
str(exception))
return json.dumps({
'result': result,
'error': error})
_wrapper.__name__ = fn.__name__
_wrapper.__doc__ = fn.__doc__
return _wrapper
@app.route('/submit/<path:command>', methods=['GET'])
@app.route('/submit', methods=['POST'])
@apiresult
def submit(command=None):
"""
Submit a job. This can be a string, which will be split into terms by " ".
It can also be a pre-split list, which is more robust.
"""
if command is None:
command = request.json
job_id = Juggler.submit_job(command)
return job_id
@app.route('/submit_many', methods=['POST'])
@apiresult
def submit_many():
"""
Everything is fired off at once. Think of it as a shotgun.
Commands are sent in as lists of lists containing terms.
[['echo', 'before this'],
['echo', 'this may happen']]
"""
commands = request.json
job_ids = list(map(Juggler.submit_job, commands))
return job_ids
@app.route('/submit_chain', methods=['POST'])
@apiresult
def submit_chain():
"""
If submit_many is a shotgun, then this is a machine gun.
Everything is fired off in order, one by one.
Commands are sent in as lists of lists containing terms.
[['echo', 'this will happen'],
['echo', 'before this']]
"""
commands = request.json
job_ids = Juggler.submit_queue(commands)
return job_ids
@app.route('/result/<int:job_id>')
@apiresult
def get_result(job_id):
"""
Gets the result of a finished job.
If the job has not yet finished, wait for it.
"""
job_id, result = Juggler.get_result(job_id)
return result
@app.route('/status/<int:job_id>')
@app.route('/status')
@apiresult
def get_status(job_id=None):
"""
Gets the job status.
This will tell you what the command was,
and if it's still running.
"""
if job_id is None:
return dict(Juggler.get_all_statuses())
job_id, result = Juggler.get_status(job_id)
return result
if __name__ == '__main__':
app.run(debug=True)
|
py
|
1a5a04fe35b952b644cb70648de17af497302aac
|
"""Test 1.4."""
def test_true():
"""Test that it returns true."""
from CTCI_1_4 import palindrome_permutation
assert palindrome_permutation('Tact Coa') is True
def test_false():
"""Test that it returns false."""
from CTCI_1_4 import palindrome_permutation
assert palindrome_permutation(';asldfjk') is False
|
py
|
1a5a06448fd1bd59e719818848e7360d54adb9ef
|
# -*- coding: utf-8 -*-
import autograd.numpy as np
from lifelines.utils import coalesce, _get_index, CensoringType
from lifelines.fitters import ParametricRegressionFitter
import pandas as pd
from lifelines.utils.safe_exp import safe_exp
class PiecewiseExponentialRegressionFitter(ParametricRegressionFitter):
r"""
This implements a piecewise constant-hazard model at pre-specified break points.
.. math:: h(t) = \begin{cases}
1/\lambda_0(x) & \text{if $t \le \tau_0$} \\
1/\lambda_1(x) & \text{if $\tau_0 < t \le \tau_1$} \\
1/\lambda_2(x) & \text{if $\tau_1 < t \le \tau_2$} \\
...
\end{cases}
where :math:`\lambda_i(x) = \exp{\beta_i x}`.
Parameters
-----------
breakpoints: list
a list of times when a new exponential model is constructed.
penalizer: float
penalize the variance of the :math:`\lambda_i`. See blog post below.
alpha: float, optional (default=0.05)
the level in the confidence intervals.
Examples
----------
See blog post `here <https://dataorigami.net/blogs/napkin-folding/churn>`_ and
paper replication `here <https://github.com/CamDavidsonPilon/lifelines-replications/blob/master/replications/Friedman_1982.ipynb>`_
"""
# about 50% faster than BFGS
_scipy_fit_method = "SLSQP"
_scipy_fit_options = {"ftol": 1e-6, "maxiter": 200}
def __init__(self, breakpoints, alpha=0.05, penalizer=0.0):
super(PiecewiseExponentialRegressionFitter, self).__init__(alpha=alpha)
breakpoints = np.sort(breakpoints)
if len(breakpoints) and not (breakpoints[-1] < np.inf):
raise ValueError("Do not add inf to the breakpoints.")
if len(breakpoints) and breakpoints[0] < 0:
raise ValueError("First breakpoint must be greater than 0.")
self.breakpoints = np.append(breakpoints, [np.inf])
self.n_breakpoints = len(self.breakpoints)
self.penalizer = penalizer
self._fitted_parameter_names = ["lambda_%d_" % i for i in range(self.n_breakpoints)]
def _add_penalty(self, params, neg_ll):
params_stacked = np.stack(params.values())
coef_penalty = 0
if self.penalizer > 0:
for i in range(params_stacked.shape[1]):
if not self._constant_cols[i]:
coef_penalty = coef_penalty + (params_stacked[:, i]).var()
return neg_ll + self.penalizer * coef_penalty
def _cumulative_hazard(self, params, T, Xs):
n = T.shape[0]
T = T.reshape((n, 1))
M = np.minimum(np.tile(self.breakpoints, (n, 1)), T)
M = np.hstack([M[:, tuple([0])], np.diff(M, axis=1)])
lambdas_ = np.array([safe_exp(-np.dot(Xs[param], params[param])) for param in self._fitted_parameter_names])
return (M * lambdas_.T).sum(1)
def _log_hazard(self, params, T, X):
hz = self._hazard(params, T, X)
hz = np.clip(hz, 1e-20, np.inf)
return np.log(hz)
def _prep_inputs_for_prediction_and_return_parameters(self, X):
X = X.copy()
if isinstance(X, pd.DataFrame):
X = X[self.params_["lambda_0_"].index]
return np.array([np.exp(np.dot(X, self.params_["lambda_%d_" % i])) for i in range(self.n_breakpoints)])
def predict_cumulative_hazard(self, df, times=None, conditional_after=None):
"""
Return the cumulative hazard rate of subjects in X at time points.
Parameters
----------
X: numpy array or DataFrame
a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns
can be in any order. If a numpy array, columns must be in the
same order as the training data.
times: iterable, optional
an iterable of increasing times to predict the cumulative hazard at. Default
is the set of all durations (observed and unobserved). Uses a linear interpolation if
points in time are not in the index.
Returns
-------
cumulative_hazard_ : DataFrame
the cumulative hazard of individuals over the timeline
"""
if isinstance(df, pd.Series):
return self.predict_cumulative_hazard(df.to_frame().T)
if conditional_after is not None:
raise NotImplementedError()
times = np.atleast_1d(coalesce(times, self.timeline, np.unique(self.durations))).astype(float)
n = times.shape[0]
times = times.reshape((n, 1))
lambdas_ = self._prep_inputs_for_prediction_and_return_parameters(df)
bp = self.breakpoints
M = np.minimum(np.tile(bp, (n, 1)), times)
M = np.hstack([M[:, tuple([0])], np.diff(M, axis=1)])
return pd.DataFrame(np.dot(M, (1 / lambdas_)), columns=_get_index(df), index=times[:, 0])
@property
def _ll_null(self):
if hasattr(self, "_ll_null_"):
return self._ll_null_
initial_point = np.zeros(len(self._fitted_parameter_names))
model = self.__class__(breakpoints=self.breakpoints[:-1], penalizer=self.penalizer)
regressors = {param_name: ["_intercept"] for param_name in self._fitted_parameter_names}
if CensoringType.is_right_censoring(self):
df = pd.DataFrame({"T": self.durations, "E": self.event_observed, "entry": self.entry, "_intercept": 1.0})
model.fit_right_censoring(
df, "T", "E", initial_point=initial_point, entry_col="entry", regressors=regressors
)
elif CensoringType.is_interval_censoring(self):
df = pd.DataFrame(
{
"lb": self.lower_bound,
"ub": self.upper_bound,
"E": self.event_observed,
"entry": self.entry,
"_intercept": 1.0,
}
)
model.fit_interval_censoring(
df, "lb", "ub", "E", initial_point=initial_point, entry_col="entry", regressors=regressors
)
if CensoringType.is_left_censoring(self):
raise NotImplementedError()
self._ll_null_ = model.log_likelihood_
return self._ll_null_
|
py
|
1a5a06cc50d6c3c0ed85d6779dfc5a2a83de3bb4
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A utility for integration test workflow.
This script helps to update PR/Issue comments and labels during testing process.
For PR comment, this script will update (create if not exist) the "Test Result" in comment.
stage value: [start, progress, end]
USAGE:
python scripts/gha/it_workflow.py --stage <stage> \
--token ${{github.token}} \
--issue_number ${{needs.check_trigger.outputs.pr_number}}\
--actor ${{github.actor}} \
--commit ${{needs.prepare_matrix.outputs.github_ref}} \
--run_id ${{github.run_id}} \
[--new_token ${{steps.generate-token.outputs.token}}]
For Daily Report, this script will update (create if not exist) the "Test Result" in Issue
with title "Nightly Integration Testing Report" and label "nightly-testing".
stage value: [report]
USAGE:
python scripts/gha/it_workflow.py --stage report \
--token ${{github.token}} \
--actor ${{github.actor}} \
--commit ${{needs.prepare_matrix.outputs.github_ref}} \
--run_id ${{github.run_id}}
"""
import datetime
import pytz
import shutil
from absl import app
from absl import flags
from absl import logging
import github
import summarize_test_results as summarize
_REPORT_LABEL = "nightly-testing"
_REPORT_TITLE = "Nightly Integration Testing Report"
_LABEL_TRIGGER_FULL = "tests-requested: full"
_LABEL_TRIGGER_QUICK = "tests-requested: quick"
_LABEL_PROGRESS = "tests: in-progress"
_LABEL_FAILED = "tests: failed"
_LABEL_SUCCEED = "tests: succeeded"
_COMMENT_TITLE_PROGESS = "### ⏳ Integration test in progress...\n"
_COMMENT_TITLE_PROGESS_FLAKY = "### Integration test with FLAKINESS (but still ⏳ in progress)\n"
_COMMENT_TITLE_PROGESS_FAIL = "### ❌ Integration test FAILED (but still ⏳ in progress)\n"
_COMMENT_TITLE_FLAKY = "### Integration test with FLAKINESS (succeeded after retry)\n"
_COMMENT_TITLE_FAIL = "### ❌ Integration test FAILED\n"
_COMMENT_TITLE_SUCCEED = "### ✅ Integration test succeeded!\n"
_COMMENT_IDENTIFIER = "integration-test-status-comment"
_COMMENT_SUFFIX = f'\n<hidden value="{_COMMENT_IDENTIFIER}"></hidden>'
_LOG_ARTIFACT_NAME = "log-artifact"
_LOG_OUTPUT_DIR = "test_results"
_BUILD_STAGES_START = "start"
_BUILD_STAGES_PROGRESS = "progress"
_BUILD_STAGES_END = "end"
_BUILD_STAGES_REPORT = "report"
_BUILD_STAGES = [_BUILD_STAGES_START, _BUILD_STAGES_PROGRESS, _BUILD_STAGES_END, _BUILD_STAGES_REPORT]
_BUILD_AGAINST_SDK = "sdk"
_BUILD_AGAINST_REPO = "repo"
FLAGS = flags.FLAGS
flags.DEFINE_string(
"stage", None,
"Different stage while running the workflow. Valid values in _BUILD_STAGES.")
flags.DEFINE_string(
"token", None,
"github.token: A token to authenticate on your repository.")
flags.DEFINE_string(
"issue_number", None,
"Github's issue # or pull request #.")
flags.DEFINE_string(
"actor", None,
"github.actor: The login of the user that initiated the workflow run.")
flags.DEFINE_string(
"commit", None, "GitHub commit hash")
flags.DEFINE_string(
"run_id", None,
"github.run_id: A unique number for each workflow run within a repository.")
flags.DEFINE_string(
"new_token", None,
"Only used with --stage end"
"Use a different token to remove the \"in-progress\" label,"
"to allow the removal to trigger the \"Check Labels\" workflow.")
flags.DEFINE_string(
"build_against", None,
"Integration testapps could either build against packaged SDK or repo")
def test_start(token, issue_number, actor, commit, run_id):
"""In PR, when start testing, add comment and label \"tests: in-progress\""""
github.add_label(token, issue_number, _LABEL_PROGRESS)
for label in [_LABEL_TRIGGER_FULL, _LABEL_TRIGGER_QUICK, _LABEL_FAILED, _LABEL_SUCCEED]:
github.delete_label(token, issue_number, label)
comment = (_COMMENT_TITLE_PROGESS +
_get_description(actor, commit, run_id) +
_COMMENT_SUFFIX)
_update_comment(token, issue_number, comment)
def test_progress(token, issue_number, actor, commit, run_id):
"""In PR, when some test failed, update failure info and
add label \"tests: failed\""""
success_or_only_flakiness, log_summary = _get_summary_table(token, run_id)
if success_or_only_flakiness and not log_summary:
# succeeded (without flakiness)
return
else:
if success_or_only_flakiness:
# all failures/errors are due to flakiness (succeeded after retry)
title = _COMMENT_TITLE_PROGESS_FLAKY
else:
# failures/errors still exist after retry
title = _COMMENT_TITLE_PROGESS_FAIL
github.add_label(token, issue_number, _LABEL_FAILED)
comment = (title +
_get_description(actor, commit, run_id) +
log_summary +
_COMMENT_SUFFIX)
_update_comment(token, issue_number, comment)
def test_end(token, issue_number, actor, commit, run_id, new_token):
"""In PR, when some test end, update Test Result Report and
update label: add \"tests: failed\" if test failed, add label
\"tests: succeeded\" if test succeed"""
success_or_only_flakiness, log_summary = _get_summary_table(token, run_id)
if success_or_only_flakiness and not log_summary:
# succeeded (without flakiness)
github.add_label(token, issue_number, _LABEL_SUCCEED)
comment = (_COMMENT_TITLE_SUCCEED +
_get_description(actor, commit, run_id) +
_COMMENT_SUFFIX)
_update_comment(token, issue_number, comment)
else:
if success_or_only_flakiness:
# all failures/errors are due to flakiness (succeeded after retry)
title = _COMMENT_TITLE_FLAKY
github.add_label(token, issue_number, _LABEL_SUCCEED)
else:
# failures/errors still exist after retry
title = _COMMENT_TITLE_FAIL
github.add_label(token, issue_number, _LABEL_FAILED)
comment = (title +
_get_description(actor, commit, run_id) +
log_summary +
_COMMENT_SUFFIX)
_update_comment(token, issue_number, comment)
github.delete_label(new_token, issue_number, _LABEL_PROGRESS)
def test_report(token, actor, commit, run_id, build_against):
"""Update (create if not exist) a Daily Report in Issue.
The Issue with title _REPORT_TITLE and label _REPORT_LABEL:
https://github.com/firebase/firebase-unity-sdk/issues?q=is%3Aissue+label%3Anightly-testing
"""
issue_number = _get_issue_number(token, _REPORT_TITLE, _REPORT_LABEL)
previous_comment = github.get_issue_body(token, issue_number)
[previous_comment_repo, previous_comment_sdk] = previous_comment.split(_COMMENT_SUFFIX)
success_or_only_flakiness, log_summary = _get_summary_table(token, run_id)
if success_or_only_flakiness and not log_summary:
# succeeded (without flakiness)
title = _COMMENT_TITLE_SUCCEED
comment = title + _get_description(actor, commit, run_id)
else:
title = _COMMENT_TITLE_FLAKY
comment = title + _get_description(actor, commit, run_id) + log_summary
if build_against==_BUILD_AGAINST_REPO:
comment = comment + _COMMENT_SUFFIX + previous_comment_sdk
else:
comment = previous_comment_repo + _COMMENT_SUFFIX + comment
if _COMMENT_TITLE_SUCCEED:
github.close_issue(token, issue_number)
else:
github.open_issue(token, issue_number)
github.update_issue_comment(token, issue_number, comment)
def _get_issue_number(token, title, label):
issues = github.search_issues_by_label(label)
for issue in issues:
if issue["title"] == title:
return issue["number"]
return github.create_issue(token, title, label, _COMMENT_SUFFIX)["number"]
def _update_comment(token, issue_number, comment):
comment_id = _get_comment_id(token, issue_number, _COMMENT_SUFFIX)
if not comment_id:
github.add_comment(token, issue_number, comment)
else:
github.update_comment(token, comment_id, comment)
def _get_comment_id(token, issue_number, comment_identifier):
comments = github.list_comments(token, issue_number)
for comment in comments:
if comment_identifier in comment['body']:
return comment['id']
return None
def _get_description(actor, commit, run_id):
"""Test Result Report Title and description"""
return ("Requested by @%s on commit %s\n" % (actor, commit) +
"Last updated: %s \n" % _get_datetime() +
"**[View integration test log & download artifacts](https://github.com/firebase/firebase-unity-sdk/actions/runs/%s)**\n" % run_id)
def _get_datetime():
"""Date time when Test Result Report updated"""
pst_now = datetime.datetime.utcnow().astimezone(pytz.timezone("America/Los_Angeles"))
return pst_now.strftime("%a %b %e %H:%M %Z %G")
def _get_summary_table(token, run_id):
"""Test Result Report Body, which is failed test table with markdown format"""
return summarize.summarize_logs(dir=_LOG_OUTPUT_DIR, markdown=True)
def _get_artifact_id(token, run_id, name):
artifacts = github.list_artifacts(token, run_id)
for artifact in artifacts:
if artifact["name"] == name:
return artifact["id"]
def main(argv):
if len(argv) > 1:
raise app.UsageError("Too many command-line arguments.")
if FLAGS.stage == _BUILD_STAGES_START:
test_start(FLAGS.token, FLAGS.issue_number, FLAGS.actor, FLAGS.commit, FLAGS.run_id)
elif FLAGS.stage == _BUILD_STAGES_PROGRESS:
test_progress(FLAGS.token, FLAGS.issue_number, FLAGS.actor, FLAGS.commit, FLAGS.run_id)
elif FLAGS.stage == _BUILD_STAGES_END:
test_end(FLAGS.token, FLAGS.issue_number, FLAGS.actor, FLAGS.commit, FLAGS.run_id, FLAGS.new_token)
elif FLAGS.stage == _BUILD_STAGES_REPORT:
test_report(FLAGS.token, FLAGS.actor, FLAGS.commit, FLAGS.run_id, FLAGS.build_against)
else:
print("Invalid stage value. Valid value: " + ",".join(_BUILD_STAGES))
if __name__ == "__main__":
flags.mark_flag_as_required("stage")
flags.mark_flag_as_required("token")
flags.mark_flag_as_required("actor")
flags.mark_flag_as_required("commit")
flags.mark_flag_as_required("run_id")
app.run(main)
|
py
|
1a5a087ca0e9c3269db9eba40f0abcb2d494b171
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.12.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_env_var_source import V1EnvVarSource
class TestV1EnvVarSource(unittest.TestCase):
""" V1EnvVarSource unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1EnvVarSource(self):
"""
Test V1EnvVarSource
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.v1_env_var_source.V1EnvVarSource()
pass
if __name__ == '__main__':
unittest.main()
|
py
|
1a5a0b2ee4f7d58f46b296425fc5073eb5ad78e6
|
from flask_wtf.file import FileField
from wtforms import Field
from wtforms.fields import Label
from wtforms.validators import InputRequired, Optional, ValidationError
from wtforms.widgets import TextInput
from re import search
from werkzeug.utils import secure_filename
def validate_filename(form, field):
'''
Validates the file inside the field if it matches the label defined in __init__
Does not raise error if field is empty - include Optional() or InputRequired() in the form if needed
'''
if field.data:
filename = secure_filename(field.data.filename)
#raise ValidationError('{}\t{}'.format(filename, field.expect))
if filename != field.expect:
message = 'Filename must match {} exactly'.format(field.expect)
raise ValidationError(message)
class NamedFileField(FileField):
'''
A file field that checks the name of its uploaded file against an expected title.
Inspect the class functions for more details.
'''
def __init__(self, label='', validators=None, expect='', required=False, **kwargs):
'''
Initializes the NamedFileField by calling super() on the FileField
Args:
label (str): if a value is provided, it will be formatted by "label.format(expect)".
validators (list of validators): optionally add extra validators.
expect (str): the title of the file to expect. ".tab" extension is not required if "label" is not provided.
required (bool): whether this field is required or not.
If label is not provided then additional text indicating the requirement will be added here in the label.
Note:
Flask WTForm docs suggest putting super().__init__ at the beginning of the function.
But since there are some built-in modifications to label text and validators, super is put at the end instead.
'''
if label:
self.expect = expect
else:
labeltxt = 'Upload ' + expect + '.tab'
self.expect = expect + '.tab'
if required:
labeltxt += ' (required)'
else:
labeltxt += ' (optional)'
if not validators:
validators = []
if required:
validators.insert(0, InputRequired())
else:
validators.insert(0, Optional())
validators.append(validate_filename)
super(FileField, self).__init__(labeltxt, validators, **kwargs)
class FloatListField(Field):
'''
A custom field to represent a list of floating point numbers.
Attributes:
widget: a text input box that is used to enter the list of numbers.
'''
widget = TextInput()
def _value(self):
'''
Reads default value from a literal float list and returns a comma-separated string
'''
if self.data:
return ', '.join(self.data)
else:
return ''
def process_formdata(self, valuelist):
'''
Processes the entered data and saves it to self.
Called at form submission but before validation.
'''
if valuelist:
self.data = [x.strip() for x in valuelist[0].split(',')]
else:
self.data = []
def validate_float_list(form, field):
'''
An inline validator to check that the entered data is a float list.
Raises:
ValidationError: if the entered data is not a float list.
'''
for x in field.data:
try:
float(x)
except ValueError:
raise ValidationError('Must be a comma-separated list of decimal numbers')
|
py
|
1a5a0bcb54c78af9a4af34dd8118508a010fe45b
|
import tty
import termios
import fcntl
import os
from typing import IO, Type, List, Union, Optional
from types import TracebackType
_Attr = List[Union[int, List[bytes]]]
class Nonblocking(object):
"""
A context manager for making an input stream nonblocking.
"""
def __init__(self, stream):
# type: (IO) -> None
self.stream = stream
self.fd = self.stream.fileno()
def __enter__(self):
# type: () -> None
self.orig_fl = fcntl.fcntl(self.fd, fcntl.F_GETFL)
fcntl.fcntl(self.fd, fcntl.F_SETFL, self.orig_fl | os.O_NONBLOCK)
def __exit__(self, type=None, value=None, traceback=None):
# type: (Optional[Type[BaseException]], Optional[BaseException], Optional[TracebackType]) -> None
fcntl.fcntl(self.fd, fcntl.F_SETFL, self.orig_fl)
class Cbreak(object):
def __init__(self, stream):
# type: (IO) -> None
self.stream = stream
def __enter__(self):
# type: () -> Termmode
self.original_stty = termios.tcgetattr(self.stream)
tty.setcbreak(self.stream, termios.TCSANOW)
return Termmode(self.stream, self.original_stty)
def __exit__(self, type=None, value=None, traceback=None):
# type: (Optional[Type[BaseException]], Optional[BaseException], Optional[TracebackType]) -> None
termios.tcsetattr(self.stream, termios.TCSANOW, self.original_stty)
class Termmode(object):
def __init__(self, stream, attrs):
# type: (IO, _Attr) -> None
self.stream = stream
self.attrs = attrs
def __enter__(self):
# type: () -> None
self.original_stty = termios.tcgetattr(self.stream)
termios.tcsetattr(self.stream, termios.TCSANOW, self.attrs)
def __exit__(self, type=None, value=None, traceback=None):
# type: (Optional[Type[BaseException]], Optional[BaseException], Optional[TracebackType]) -> None
termios.tcsetattr(self.stream, termios.TCSANOW, self.original_stty)
|
py
|
1a5a0cccd0d7881118dbd736164bc4004c717956
|
# encoding: utf-8
import logging
from ckanext.metadata.logic.auth import check_privs, _authorize_package_action, _authorize_group_action, _authorize_member_action
log = logging.getLogger(__name__)
def package_create(context, data_dict):
"""
Override CKAN's package_create to prevent extension-specific package types from being
created directly via this action.
"""
return _authorize_package_action('create', context, data_dict)
def group_create(context, data_dict):
"""
Override CKAN's group_create to prevent extension-specific group types from being
created directly via this action.
"""
return _authorize_group_action('create', context, data_dict)
def member_create(context, data_dict):
"""
Override CKAN's member_create to prevent it being used to assign a metadata record to
an extension group type.
"""
return _authorize_member_action('create', context, data_dict)
# Admin functions
def metadata_standard_create(context, data_dict):
return {'success': check_privs(context, require_admin=True)}
def metadata_schema_create(context, data_dict):
return {'success': check_privs(context, require_admin=True)}
def infrastructure_create(context, data_dict):
return {'success': check_privs(context, require_admin=True)}
def organization_create(context, data_dict):
return {'success': check_privs(context, require_admin=True)}
def workflow_state_create(context, data_dict):
return {'success': check_privs(context, require_admin=True)}
def workflow_transition_create(context, data_dict):
return {'success': check_privs(context, require_admin=True)}
def workflow_annotation_create(context, data_dict):
return {'success': check_privs(context, require_admin=True)}
def metadata_standard_index_create(context, data_dict):
return {'success': check_privs(context, require_admin=True)}
def metadata_json_attr_map_create(context, data_dict):
return {'success': check_privs(context, require_admin=True)}
def infrastructure_member_create(context, data_dict):
return {'success': check_privs(context, require_admin=True)}
# Curation functions
def metadata_collection_create(context, data_dict):
return {'success': check_privs(context, require_curator=True, require_organization=(data_dict or {}).get('organization_id'))}
# Contributor functions
def metadata_record_create(context, data_dict):
return {'success': check_privs(context, require_contributor=True, require_organization=(data_dict or {}).get('owner_org'))}
def metadata_record_workflow_annotation_create(context, data_dict):
organization_id = context['model'].Package.get(data_dict['id']).owner_org if 'id' in (data_dict or {}) else None
return {'success': check_privs(context, require_contributor=True, require_organization=organization_id)}
|
py
|
1a5a0cececc5b3f3cb9e4632da4d538f489bd38c
|
from __future__ import print_function, division
from future import standard_library
standard_library.install_aliases()
from builtins import range
from builtins import object
import os
import pickle as pickle
import numpy as np
from DSVC import optim
class Solver(object):
"""
A Solver encapsulates all the logic necessary for training classification
models. The Solver performs stochastic gradient descent using different
update rules defined in optim.py.
The solver accepts both training and validataion data and labels so it can
periodically check classification accuracy on both training and validation
data to watch out for overfitting.
To train a model, you will first construct a Solver instance, passing the
model, dataset, and various optoins (learning rate, batch size, etc) to the
constructor. You will then call the train() method to run the optimization
procedure and train the model.
After the train() method returns, model.params will contain the parameters
that performed best on the validation set over the course of training.
In addition, the instance variable solver.loss_history will contain a list
of all losses encountered during training and the instance variables
solver.train_acc_history and solver.val_acc_history will be lists of the
accuracies of the model on the training and validation set at each epoch.
Example usage might look something like this:
data = {
'X_train': # training data
'y_train': # training labels
'X_val': # validation data
'y_val': # validation labels
}
model = MyAwesomeModel(hidden_size=100, reg=10)
solver = Solver(model, data,
update_rule='sgd',
optim_config={
'learning_rate': 1e-3,
},
lr_decay=0.95,
num_epochs=10, batch_size=100,
print_every=100)
solver.train()
A Solver works on a model object that must conform to the following API:
- model.params must be a dictionary mapping string parameter names to numpy
arrays containing parameter values.
- model.loss(X, y) must be a function that computes training-time loss and
gradients, and test-time classification scores, with the following inputs
and outputs:
Inputs:
- X: Array giving a minibatch of input data of shape (N, d_1, ..., d_k)
- y: Array of labels, of shape (N,) giving labels for X where y[i] is the
label for X[i].
Returns:
If y is None, run a test-time forward pass and return:
- scores: Array of shape (N, C) giving classification scores for X where
scores[i, c] gives the score of class c for X[i].
If y is not None, run a training time forward and backward pass and
return a tuple of:
- loss: Scalar giving the loss
- grads: Dictionary with the same keys as self.params mapping parameter
names to gradients of the loss with respect to those parameters.
"""
def __init__(self, model, data, **kwargs):
"""
Construct a new Solver instance.
Required arguments:
- model: A model object conforming to the API described above
- data: A dictionary of training and validation data containing:
'X_train': Array, shape (N_train, d_1, ..., d_k) of training images
'X_val': Array, shape (N_val, d_1, ..., d_k) of validation images
'y_train': Array, shape (N_train,) of labels for training images
'y_val': Array, shape (N_val,) of labels for validation images
Optional arguments:
- update_rule: A string giving the name of an update rule in optim.py.
Default is 'sgd'.
- optim_config: A dictionary containing hyperparameters that will be
passed to the chosen update rule. Each update rule requires different
hyperparameters (see optim.py) but all update rules require a
'learning_rate' parameter so that should always be present.
- lr_decay: A scalar for learning rate decay; after each epoch the
learning rate is multiplied by this value.
- batch_size: Size of minibatches used to compute loss and gradient
during training.
- num_epochs: The number of epochs to run for during training.
- print_every: Integer; training losses will be printed every
print_every iterations.
- verbose: Boolean; if set to false then no output will be printed
during training.
- num_train_samples: Number of training samples used to check training
accuracy; default is 1000; set to None to use entire training set.
- num_val_samples: Number of validation samples to use to check val
accuracy; default is None, which uses the entire validation set.
- checkpoint_name: If not None, then save model checkpoints here every
epoch.
"""
self.model = model
self.X_train = data['X_train']
self.y_train = data['y_train']
self.X_val = data['X_val']
self.y_val = data['y_val']
# Unpack keyword arguments
self.update_rule = kwargs.pop('update_rule', 'sgd')
self.optim_config = kwargs.pop('optim_config', {})
self.lr_decay = kwargs.pop('lr_decay', 1.0)
self.batch_size = kwargs.pop('batch_size', 100)
self.num_epochs = kwargs.pop('num_epochs', 10)
self.num_train_samples = kwargs.pop('num_train_samples', 1000)
self.num_val_samples = kwargs.pop('num_val_samples', None)
self.checkpoint_name = kwargs.pop('checkpoint_name', None)
self.print_every = kwargs.pop('print_every', 10)
self.verbose = kwargs.pop('verbose', True)
# Throw an error if there are extra keyword arguments
if len(kwargs) > 0:
extra = ', '.join('"%s"' % k for k in list(kwargs.keys()))
raise ValueError('Unrecognized arguments %s' % extra)
# Make sure the update rule exists, then replace the string
# name with the actual function
if not hasattr(optim, self.update_rule):
raise ValueError('Invalid update_rule "%s"' % self.update_rule)
self.update_rule = getattr(optim, self.update_rule)
self._reset()
def _reset(self):
"""
Set up some book-keeping variables for optimization. Don't call this
manually.
"""
# Set up some variables for book-keeping
self.epoch = 0
self.best_val_acc = 0
self.best_params = {}
self.loss_history = []
self.train_acc_history = []
self.val_acc_history = []
# Make a deep copy of the optim_config for each parameter
self.optim_configs = {}
for p in self.model.params:
d = {k: v for k, v in self.optim_config.items()}
self.optim_configs[p] = d
def _step(self):
"""
Make a single gradient update. This is called by train() and should not
be called manually.
"""
# Make a minibatch of training data
num_train = self.X_train.shape[0]
batch_mask = np.random.choice(num_train, self.batch_size)
X_batch = self.X_train[batch_mask]
y_batch = self.y_train[batch_mask]
# Compute loss and gradient
loss, grads = self.model.loss(X_batch, y_batch)
self.loss_history.append(loss)
# Perform a parameter update
for p, w in self.model.params.items():
dw = grads[p]
config = self.optim_configs[p]
next_w, next_config = self.update_rule(w, dw, config)
self.model.params[p] = next_w
self.optim_configs[p] = next_config
def _save_checkpoint(self):
if self.checkpoint_name is None: return
checkpoint = {
'model': self.model,
'update_rule': self.update_rule,
'lr_decay': self.lr_decay,
'optim_config': self.optim_config,
'batch_size': self.batch_size,
'num_train_samples': self.num_train_samples,
'num_val_samples': self.num_val_samples,
'epoch': self.epoch,
'loss_history': self.loss_history,
'train_acc_history': self.train_acc_history,
'val_acc_history': self.val_acc_history,
}
filename = '%s_epoch_%d.pkl' % (self.checkpoint_name, self.epoch)
if self.verbose:
print('Saving checkpoint to "%s"' % filename)
with open(filename, 'wb') as f:
pickle.dump(checkpoint, f)
def check_accuracy(self, X, y, num_samples=None, batch_size=100):
"""
Check accuracy of the model on the provided data.
Inputs:
- X: Array of data, of shape (N, d_1, ..., d_k)
- y: Array of labels, of shape (N,)
- num_samples: If not None, subsample the data and only test the model
on num_samples datapoints.
- batch_size: Split X and y into batches of this size to avoid using
too much memory.
Returns:
- acc: Scalar giving the fraction of instances that were correctly
classified by the model.
"""
# Maybe subsample the data
N = X.shape[0]
if num_samples is not None and N > num_samples:
mask = np.random.choice(N, num_samples)
N = num_samples
X = X[mask]
y = y[mask]
# Compute predictions in batches
num_batches = N // batch_size
if N % batch_size != 0:
num_batches += 1
y_pred = []
for i in range(num_batches):
start = i * batch_size
end = (i + 1) * batch_size
scores = self.model.loss(X[start:end])
y_pred.append(np.argmax(scores, axis=1))
y_pred = np.hstack(y_pred)
acc = np.mean(y_pred == y)
return acc
def train(self):
"""
Run optimization to train the model.
"""
num_train = self.X_train.shape[0]
iterations_per_epoch = max(num_train // self.batch_size, 1)
num_iterations = self.num_epochs * iterations_per_epoch
for t in range(num_iterations):
self._step()
# Maybe print training loss
if self.verbose and t % self.print_every == 0:
print('(Iteration %d / %d) loss: %f' % (
t + 1, num_iterations, self.loss_history[-1]))
# At the end of every epoch, increment the epoch counter and decay
# the learning rate.
epoch_end = (t + 1) % iterations_per_epoch == 0
if epoch_end:
self.epoch += 1
for k in self.optim_configs:
self.optim_configs[k]['learning_rate'] *= self.lr_decay
# Check train and val accuracy on the first iteration, the last
# iteration, and at the end of each epoch.
first_it = (t == 0)
last_it = (t == num_iterations - 1)
if first_it or last_it or epoch_end:
train_acc = self.check_accuracy(self.X_train, self.y_train,
num_samples=self.num_train_samples)
val_acc = self.check_accuracy(self.X_val, self.y_val,
num_samples=self.num_val_samples)
self.train_acc_history.append(train_acc)
self.val_acc_history.append(val_acc)
self._save_checkpoint()
if self.verbose:
print('(Epoch %d / %d) train acc: %f; val_acc: %f' % (
self.epoch, self.num_epochs, train_acc, val_acc))
# Keep track of the best model
if val_acc > self.best_val_acc:
self.best_val_acc = val_acc
self.best_params = {}
for k, v in self.model.params.items():
self.best_params[k] = v.copy()
# At the end of training swap the best params into the model
self.model.params = self.best_params
|
py
|
1a5a0d1c14e933b83d045371acbb5777d127726e
|
# ckwg +29
# Copyright 2020 by Kitware, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither name of Kitware, Inc. nor the names of any contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import print_function
from kwiver.vital.algo import ComputeRefHomography
from kwiver.vital.tests.py_helpers import CommonConfigurationMixin
class SimpleComputeRefHomography(CommonConfigurationMixin, ComputeRefHomography):
"""
Implementation of ComputeRefHomography to test it
Examples:
"""
def __init__(self):
ComputeRefHomography.__init__(self)
def __vital_algorithm_register__():
from kwiver.vital.algo import algorithm_factory
# Register Algorithm
implementation_name = "SimpleComputeRefHomography"
if algorithm_factory.has_algorithm_impl_name(
SimpleComputeRefHomography.static_type_name(),
implementation_name):
return
algorithm_factory.add_algorithm( implementation_name,
"Test kwiver.vital.algo.ComputeRefHomography",
SimpleComputeRefHomography )
algorithm_factory.mark_algorithm_as_loaded( implementation_name )
|
py
|
1a5a0d1c7d7167b3a54529aed9126f4ccd95fd28
|
import av
import os
from collections import OrderedDict
import importlib
from .base import EncoderConfig
vencoders = OrderedDict()
aencoders = OrderedDict()
sencoders = OrderedDict()
for codec in sorted(av.codecs_available):
try:
c = av.codec.Codec(codec, "w")
except Exception:
pass
else:
if c.type == "video":
vencoders[codec] = c.long_name
if c.type == "audio":
aencoders[codec] = c.long_name
if c.type == "subtitle":
sencoders[codec] = c.long_name
def createConfigObj(codec):
if codec in encoders:
return encoders[codec]()
return EncoderConfig(codec)
encoders = {}
def scan():
_path = os.path.split(__file__)[0]
encoders.clear()
for _module in os.listdir(_path):
if _module[0] in "_." or _module in ("base.py",):
continue
if (os.path.isfile(os.path.join(_path, _module))
and _module.lower().endswith(".py")):
_module = importlib.import_module(f"{__name__}.{_module[:-3]}")
elif (os.path.isdir(os.path.join(_path, _module))
and os.path.isfile(os.path.join(_path, _module, "__init__.py"))):
_module = importlib.import_module(f"{__name__}.{_module}")
else:
continue
for _key in dir(_module):
_cls = getattr(_module, _key)
if (isinstance(_cls, type)
and issubclass(_cls, EncoderConfig)
and _cls not in (EncoderConfig,)
and hasattr(_cls, "codec")):
encoders[_cls.codec] = _cls
scan()
|
py
|
1a5a0d2df04405f9994258dc211f57a01b2356b7
|
"""KeysightDAQ enables controlling various Keysight DAQs."""
from __future__ import print_function
from typing import List, Optional, Union
import time
from pyvisainstrument.VisaResource import VisaResource
class KeysightDAQ(VisaResource):
""" KeysightDAQ enables controlling various Keysight DAQs.
Args:
num_slots(int): Number of slots in DAQ
num_channels(int): Number of channels per slot
sc_format(str, optional): Slot+channel route format. Default is SCC
"""
def __init__(self, num_slots: int, num_channels: int, *args, sc_format: Optional[str] = None, **kwargs):
super().__init__(name='DAQ', *args, **kwargs)
self.num_slots = num_slots
self.num_channels = num_channels
self.ch_precision = sc_format.upper().count('C') if sc_format else 2
def is_channel_closed(self, channel: Union[int, str]):
""" Get if channel is closed.
Args:
channel (int): Actuator with format SCC[C]
Returns:
bool: True if channel is closed
"""
return self.query(f'ROUT:CLOS? (@{channel})') == '1'
def is_channel_open(self, channel: Union[int, str]):
""" Get if channel is open.
Args:
channel (int): Actuator with format SCC[C]
Returns:
bool: True if channel is open
"""
return not self.is_channel_closed(channel)
def open_all_channels(self, slot: Union[int, str], delay: float = 0):
""" Open all channels of a slot.
Args:
slot int: Slot (1-based)
"""
slot_start = f'{slot}{1:0{self.ch_precision}d}'
slot_end = f'{slot}{self.num_channels:0{self.ch_precision}d}'
self.write(f'ROUT:OPEN (@{slot_start}:{slot_end})')
time.sleep(delay)
def close_all_channels(self, slot: Union[int, str], delay: float = 0):
""" Close all channels of a slot.
Args:
slot (Union[int, str]): Slot (1-based)
delay (float, optional):
Delay between each channel operation.
Default is 0 - no delay
"""
# NOTE: Will continue closing one at a time due to large current draw that may result
# if done concurrently.
for i in range(self.num_channels):
ch = f'{slot}{i+1:0{self.ch_precision}d}'
self.close_channel(ch, delay)
def open_channels(self, channels: List[Union[int, str]], delay: float = 0):
""" Open specified channels.
Args:
channels ([Union[int, str]]):
Channel indices with format SCC
delay (int, optional):
Delay between each channel operation.
Default is 0 - no delay
"""
for ch in channels:
self.open_channel(ch, delay)
def close_channels(self, channels: List[Union[int, str]], delay: float = 0):
""" Close specified channels.
Args:
channels ([Union[int, str]]):
Channel indices with format SCC
delay (int, optional):
Delay between each channel operation.
Default is 0 - no delay
"""
for ch in channels:
self.close_channel(ch, delay)
def open_channel(self, channel: Union[int, str], delay: float = 0):
""" Open specified channel.
Args:
channel (Union[int, str]):
Channel index with format SCCc
delay (int, optional):
Delay after channel operation.
Default is 0 - no delay
"""
self.write(f'ROUT:OPEN (@{channel})')
time.sleep(delay)
def close_channel(self, channel: Union[int, str], delay: float = 0):
""" Close specified channel.
Args:
channel (Union[int, str]):
Channel index with format SCC
delay (int, optional):
Delay after channel operation.
Default is 0 - no delay
"""
self.write(f'ROUT:CLOS (@{channel})')
time.sleep(delay)
def measure_temperature(self, probe: str, probe_type: str, resolution: Optional[str] = None):
""" Reset, configure, and measure temperature.
Args:
probe: {FRTD | RTD | FTHermistor | THERmistor | TCouple | DEF}
probe_type:
For FRTD and RTD: Type 85
For FTHermistor and THERmistor: Type 2252, 5000, and 10,000
For TCouple:Type B, E, J, K, N, R, S, and T
resolution: Default 1 PLC
Returns:
float: temperature (°C is default unit)
"""
return float(self.query(f'MEAS:TEMP? {probe},{probe_type}'))
def measure_relative_humidity(self, probe: str, probe_type: str, resolution: Optional[str] = None):
""" Reset, configure, and measure relative humidity.
NOTE: This is not a standard SCPI command for DAQs.
Args:
probe: {FRTD | RTD | FTHermistor | THERmistor | TCouple | DEF}
probe_type:
For FRTD and RTD: Type 85
For FTHermistor and THERmistor: Type 2252, 5000, and 10,000
For TCouple:Type B, E, J, K, N, R, S, and T
resolution: Default 1 PLC
Returns:
float: rel humidity (%)
"""
return float(self.query(f'MEAS:RHumidity? {probe},{probe_type}'))
def wait_for_completion(self, timeout: float = 2):
"""Wait for physical operation to complete.
Args:
timeout (float):
Max time to wait for completion in secs.
Returns:
Exception if timeout reached
"""
done = False
wait_time = 0.0
while not done:
time.sleep(15E-3)
done_str = self.resource.query('ROUT:DONE?', delay=15E-3)
if isinstance(done_str, str) and done_str.strip().isnumeric():
done = int(done_str.strip())
wait_time += 15E-3
if wait_time >= timeout:
raise Exception('Timeout occurred waiting for route to finish.')
if __name__ == '__main__':
print('Started')
daq = KeysightDAQ(
bus_address='TCPIP::127.0.0.1::5020::SOCKET',
num_slots=3,
num_channels=20
)
daq.open(baud_rate=None, read_term='\n', write_term='\n')
daq.open_all_channels(1)
daq.open_channels([101, 103, 105])
daq.close_channels([101, 103, 105])
print("Finished")
|
py
|
1a5a0d480c9125bd3833303ba55328a20475781a
|
print '\033[0;31mHello Red!'
print '\033[0;32mHello Green!'
print '\033[0;30mHello Black!'
def edinici(x):
if x == 1:
return "odin"
elif x == 2:
return "Dva"
elif x == 3:
return "Tri"
elif x == 4:
return "Chetire"
elif x == 5:
return "Pyat'"
elif x == 6:
return "Shest'"
elif x == 7:
return "Sem''"
elif x == 8:
return "Vosem"
elif x == 9:
return "Devyat'"
else:
return ""
def desyatki(x):
if x == 10:
return "Desyat'"
elif x == 11:
return "Odinadzat'"
elif x == 12:
return "Dvenadzat'"
elif x == 13:
return "Trenadzat'"
elif x == 14:
return "Chetirnadzat'"
elif x == 15:
return "Pyatnadzat'"
elif x == 16:
return "Shestnadcat'"
elif x == 17:
return "Semnadzat'"
elif x == 18:
return "Vosemnadzat'"
elif x == 19:
return "Devyatnadzat'"
elif okruglenie(x) == 20:
return "Dvadzat' "
elif okruglenie(x) == 30:
return "Tridzat' "
elif okruglenie(x) == 40:
return "Sorok "
elif okruglenie(x) == 50:
return "Pyatdesyat "
elif okruglenie(x) == 60:
return "SHestdesyat "
elif okruglenie(x) == 70:
return "Semdesyat "
elif okruglenie(x) == 80:
return "Vosemdesyat "
elif okruglenie(x) == 90:
return "Devyanosto "
else:
return ""
def sotni(x):
if okruglenie(x) == 100:
return "sto "
elif okruglenie(x) == 200:
return "dvesti "
elif okruglenie(x) == 300:
return "trista "
elif okruglenie(x) == 400:
return "chetiresta "
elif okruglenie(x) == 500:
return "pyatsot "
elif okruglenie(x) == 600:
return "shestsot "
elif okruglenie(x) == 700:
return "semsot "
elif okruglenie(x) == 800:
return "vosemsot "
elif okruglenie(x) == 900:
return "devyatsot "
else:
return ""
def cifer_liter(x):
a = x % 1000
b = x % 100
c = x % 10
edin_i = ""
sotnya_i = sotni(a)
desyat_i = desyatki(b)
if b not in range(10, 20):
edin_i = edinici(c)
return sotnya_i + desyat_i + edin_i
x = 22
b = x % 100
def okruglenie(n):
if 999 < n < 10000:
n_ost = n % 1000
n = n - n_ost
elif 99 < n < 1000:
n_ost = n % 100
n = n - n_ost
elif 19 < n < 100:
n_ost = n % 10
n = n - n_ost
return n
print cifer_liter(int(raw_input("Enter number from 1 to 999: ")))
|
py
|
1a5a0e01d7254e5a7b9674d18c116e88136d417f
|
"""
Django settings for texashospital project.
Generated by 'django-admin startproject' using Django 3.2.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import os
from decouple import config
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config("SECRET_KEY")
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'orders.apps.OrdersConfig',
'users.apps.UsersConfig',
'crispy_forms',
#'django_select2',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.gis',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'texashospital.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'texashospital.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
#'ENGINE': 'django.db.backends.sqlite3',
'ENGINE': "django.contrib.gis.db.backends.spatialite",
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Cache configuration
#CACHES = {
#"default": {
#"BACKEND": "django_redis.cache.RedisCache",
#"LOCATION": "redis://127.0.0.1:6379/2",
#"OPTIONS": {
#"CLIENT_CLASS": "django_redis.client.DefaultClient",
#}
#},
#"select2": {
#"BACKEND": "django_redis.cache.RedisCache",
#"LOCATION": "redis://127.0.0.1:6379/2",
#"OPTIONS": {
#"CLIENT_CLASS": "django_redis.client.DefaultClient",
#}
#}
#}
# Tell select2 which cache configuration to use
#SELECT2_CACHE_BACKEND = "select2"
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
#{
# 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
#},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
}
#{
# 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
#},
#{
# 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
#},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR, "media")
MEDIA_URL = "/media/"
CRISPY_TEMPLATE_PACK = "bootstrap4"
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
LOGIN_REDIRECT_URL = "orders-home"
LOGIN_URL = "login"
|
py
|
1a5a0e22fff884cb4a02d916ce61565afa28d785
|
from RecoTracker.IterativeTracking.LowPtQuadStep_cff import *
from HIPixelTripletSeeds_cff import *
from HIPixel3PrimTracks_cfi import *
hiLowPtQuadStepClusters = cms.EDProducer("HITrackClusterRemover",
clusterLessSolution = cms.bool(True),
trajectories = cms.InputTag("hiGlobalPrimTracks"),
overrideTrkQuals = cms.InputTag('hiInitialStepSelector','hiInitialStep'),
TrackQuality = cms.string('highPurity'),
minNumberOfLayersWithMeasBeforeFiltering = cms.int32(0),
pixelClusters = cms.InputTag("siPixelClusters"),
stripClusters = cms.InputTag("siStripClusters"),
Common = cms.PSet(
maxChi2 = cms.double(9.0),
),
Strip = cms.PSet(
#Yen-Jie's mod to preserve merged clusters
maxSize = cms.uint32(2),
maxChi2 = cms.double(9.0)
)
)
# SEEDING LAYERS
# Using 4 layers layerlist
hiLowPtQuadStepSeedLayers = hiPixelLayerQuadruplets.clone()
hiLowPtQuadStepSeedLayers.BPix.skipClusters = cms.InputTag('hiLowPtQuadStepClusters')
hiLowPtQuadStepSeedLayers.FPix.skipClusters = cms.InputTag('hiLowPtQuadStepClusters')
# SEEDS
from RecoTracker.TkTrackingRegions.globalTrackingRegionWithVertices_cfi import globalTrackingRegionWithVertices as _globalTrackingRegionWithVertices
from RecoTracker.TkHitPairs.hitPairEDProducer_cfi import hitPairEDProducer as _hitPairEDProducer
from RecoPixelVertexing.PixelTriplets.pixelTripletHLTEDProducer_cfi import pixelTripletHLTEDProducer as _pixelTripletHLTEDProducer
from RecoPixelVertexing.PixelLowPtUtilities.ClusterShapeHitFilterESProducer_cfi import *
from RecoPixelVertexing.PixelLowPtUtilities.trackCleaner_cfi import *
from RecoPixelVertexing.PixelTrackFitting.pixelFitterByHelixProjections_cfi import *
from RecoHI.HiTracking.HIPixelTrackFilter_cff import *
from RecoHI.HiTracking.HITrackingRegionProducer_cfi import *
hiLowPtQuadStepTrackingRegions = _globalTrackingRegionWithVertices.clone(RegionPSet=dict(
precise = True,
useMultipleScattering = False,
useFakeVertices = False,
beamSpot = "offlineBeamSpot",
useFixedError = True,
nSigmaZ = 4.0,
sigmaZVertex = 4.0,
fixedError = 0.5,
VertexCollection = "hiSelectedPixelVertex",
ptMin = 0.3,#0.2 for pp
useFoundVertices = True,
originRadius = 0.02 #0.02 for pp
))
hiLowPtQuadStepTracksHitDoubletsCA = _hitPairEDProducer.clone(
clusterCheck = "",
seedingLayers = "hiLowPtQuadStepSeedLayers",
trackingRegions = "hiLowPtQuadStepTrackingRegions",
maxElement = 50000000,
produceIntermediateHitDoublets = True,
layerPairs = [0,1,2]
)
import RecoPixelVertexing.PixelLowPtUtilities.LowPtClusterShapeSeedComparitor_cfi
from RecoPixelVertexing.PixelTriplets.caHitQuadrupletEDProducer_cfi import caHitQuadrupletEDProducer as _caHitQuadrupletEDProducer
hiLowPtQuadStepTracksHitQuadrupletsCA = _caHitQuadrupletEDProducer.clone(
doublets = "hiLowPtQuadStepTracksHitDoubletsCA",
extraHitRPhitolerance = 0.0,
SeedComparitorPSet = RecoPixelVertexing.PixelLowPtUtilities.LowPtClusterShapeSeedComparitor_cfi.LowPtClusterShapeSeedComparitor.clone(),
maxChi2 = dict(
pt1 = 0.7, pt2 = 2,
value1 = 1000, value2 = 150,
),
useBendingCorrection = True,
fitFastCircle = True,
fitFastCircleChi2Cut = True,
CAThetaCut = 0.0017,
CAPhiCut = 0.3,
)
hiLowPtQuadStepPixelTracksFilter = hiFilter.clone(
nSigmaTipMaxTolerance = 0,
lipMax = 1.0,
tipMax = 1.0,
ptMin = 0.4, #seeding region is 0.3
)
hiLowPtQuadStepPixelTracks = cms.EDProducer("PixelTrackProducer",
passLabel = cms.string('Pixel detached tracks with vertex constraint'),
# Ordered Hits
SeedingHitSets = cms.InputTag("hiLowPtQuadStepTracksHitQuadrupletsCA"),
# Fitter
Fitter = cms.InputTag("pixelFitterByHelixProjections"),
# Filter
Filter = cms.InputTag("hiLowPtQuadStepPixelTracksFilter"),
# Cleaner
Cleaner = cms.string("trackCleaner")
)
import RecoPixelVertexing.PixelLowPtUtilities.TrackSeeds_cfi
hiLowPtQuadStepSeeds = RecoPixelVertexing.PixelLowPtUtilities.TrackSeeds_cfi.pixelTrackSeeds.clone(
InputCollection = 'hiLowPtQuadStepPixelTracks'
)
# QUALITY CUTS DURING TRACK BUILDING
import TrackingTools.TrajectoryFiltering.TrajectoryFilter_cff
hiLowPtQuadStepTrajectoryFilter = TrackingTools.TrajectoryFiltering.TrajectoryFilter_cff.CkfBaseTrajectoryFilter_block.clone(
#maxLostHits = 1,
minimumNumberOfHits = 3,#3 for pp
minPt = cms.double(0.075),# 0.075 for pp
#constantValueForLostHitsFractionFilter = cms.double(0.701)
)
import TrackingTools.KalmanUpdators.Chi2MeasurementEstimator_cfi
hiLowPtQuadStepChi2Est = TrackingTools.KalmanUpdators.Chi2MeasurementEstimator_cfi.Chi2MeasurementEstimator.clone(
ComponentName = cms.string('hiLowPtQuadStepChi2Est'),
nSigma = cms.double(3.0),
MaxChi2 = cms.double(9.0)
)
# TRACK BUILDING
import RecoTracker.CkfPattern.GroupedCkfTrajectoryBuilder_cfi
hiLowPtQuadStepTrajectoryBuilder = RecoTracker.CkfPattern.GroupedCkfTrajectoryBuilder_cfi.GroupedCkfTrajectoryBuilder.clone(
MeasurementTrackerName = '',
trajectoryFilter = cms.PSet(refToPSet_ = cms.string('hiLowPtQuadStepTrajectoryFilter')),
maxCand = 4,#4 for pp
estimator = cms.string('hiLowPtQuadStepChi2Est'),
maxDPhiForLooperReconstruction = cms.double(2.0),#2.0 for pp
# 0.63 GeV is the maximum pT for a charged particle to loop within the 1.1m radius
# of the outermost Tracker barrel layer (B=3.8T)
maxPtForLooperReconstruction = cms.double(0.7),# 0.7 for pp
alwaysUseInvalidHits = cms.bool(False)
)
# MAKING OF TRACK CANDIDATES
# Trajectory cleaner in default
import RecoTracker.CkfPattern.CkfTrackCandidates_cfi
hiLowPtQuadStepTrackCandidates = RecoTracker.CkfPattern.CkfTrackCandidates_cfi.ckfTrackCandidates.clone(
src = cms.InputTag('hiLowPtQuadStepSeeds'),
### these two parameters are relevant only for the CachingSeedCleanerBySharedInput
numHitsForSeedCleaner = cms.int32(50),
onlyPixelHitsForSeedCleaner = cms.bool(True),
TrajectoryBuilderPSet = cms.PSet(refToPSet_ = cms.string('hiLowPtQuadStepTrajectoryBuilder')),
TrajectoryBuilder = cms.string('hiLowPtQuadStepTrajectoryBuilder'),
clustersToSkip = cms.InputTag('hiLowPtQuadStepClusters'),
doSeedingRegionRebuilding = True,
useHitsSplitting = True
)
# TRACK FITTING
import RecoTracker.TrackProducer.TrackProducer_cfi
hiLowPtQuadStepTracks = RecoTracker.TrackProducer.TrackProducer_cfi.TrackProducer.clone(
src = 'hiLowPtQuadStepTrackCandidates',
AlgorithmName = cms.string('lowPtQuadStep'),
Fitter=cms.string('FlexibleKFFittingSmoother')
)
# Final selection
import RecoHI.HiTracking.hiMultiTrackSelector_cfi
hiLowPtQuadStepSelector = RecoHI.HiTracking.hiMultiTrackSelector_cfi.hiMultiTrackSelector.clone(
src='hiLowPtQuadStepTracks',
useAnyMVA = cms.bool(True),
GBRForestLabel = cms.string('HIMVASelectorIter8'),#FIXME MVA for new iteration
GBRForestVars = cms.vstring(['chi2perdofperlayer', 'nhits', 'nlayers', 'eta']),
trackSelectors= cms.VPSet(
RecoHI.HiTracking.hiMultiTrackSelector_cfi.hiLooseMTS.clone(
name = 'hiLowPtQuadStepLoose',
applyAdaptedPVCuts = cms.bool(False),
useMVA = cms.bool(False),
), #end of pset
RecoHI.HiTracking.hiMultiTrackSelector_cfi.hiTightMTS.clone(
name = 'hiLowPtQuadStepTight',
preFilterName = 'hiLowPtQuadStepLoose',
applyAdaptedPVCuts = cms.bool(False),
useMVA = cms.bool(True),
minMVA = cms.double(-0.2)
),
RecoHI.HiTracking.hiMultiTrackSelector_cfi.hiHighpurityMTS.clone(
name = 'hiLowPtQuadStep',
preFilterName = 'hiLowPtQuadStepTight',
applyAdaptedPVCuts = cms.bool(False),
useMVA = cms.bool(True),
minMVA = cms.double(-0.09)
),
) #end of vpset
) #end of clone
from Configuration.Eras.Modifier_trackingPhase1_cff import trackingPhase1
trackingPhase1.toModify(hiLowPtQuadStepSelector, useAnyMVA = cms.bool(False))
trackingPhase1.toModify(hiLowPtQuadStepSelector, trackSelectors= cms.VPSet(
RecoHI.HiTracking.hiMultiTrackSelector_cfi.hiLooseMTS.clone(
name = 'hiLowPtQuadStepLoose',
applyAdaptedPVCuts = cms.bool(False),
useMVA = cms.bool(False),
), #end of pset
RecoHI.HiTracking.hiMultiTrackSelector_cfi.hiTightMTS.clone(
name = 'hiLowPtQuadStepTight',
preFilterName = 'hiLowPtQuadStepLoose',
applyAdaptedPVCuts = cms.bool(False),
useMVA = cms.bool(False),
minMVA = cms.double(-0.2)
),
RecoHI.HiTracking.hiMultiTrackSelector_cfi.hiHighpurityMTS.clone(
name = 'hiLowPtQuadStep',
preFilterName = 'hiLowPtQuadStepTight',
applyAdaptedPVCuts = cms.bool(False),
useMVA = cms.bool(False),
minMVA = cms.double(-0.09)
),
) #end of vpset
)
import RecoTracker.FinalTrackSelectors.trackListMerger_cfi
hiLowPtQuadStepQual = RecoTracker.FinalTrackSelectors.trackListMerger_cfi.trackListMerger.clone(
TrackProducers=cms.VInputTag(cms.InputTag('hiLowPtQuadStepTracks')),
hasSelector=cms.vint32(1),
selectedTrackQuals = cms.VInputTag(cms.InputTag("hiLowPtQuadStepSelector","hiLowPtQuadStep")),
copyExtras = True,
makeReKeyedSeeds = cms.untracked.bool(False),
)
hiLowPtQuadStep = cms.Sequence(hiLowPtQuadStepClusters*
hiLowPtQuadStepSeedLayers*
hiLowPtQuadStepTrackingRegions*
hiLowPtQuadStepTracksHitDoubletsCA*
hiLowPtQuadStepTracksHitQuadrupletsCA*
pixelFitterByHelixProjections*
hiLowPtQuadStepPixelTracksFilter*
hiLowPtQuadStepPixelTracks*
hiLowPtQuadStepSeeds*
hiLowPtQuadStepTrackCandidates*
hiLowPtQuadStepTracks*
hiLowPtQuadStepSelector*
hiLowPtQuadStepQual)
|
py
|
1a5a0e5f9cced75edd7e59d00832167251da25b8
|
#!/usr/bin/python
# Copyright (c) 2003-2015 CORE Security Technologies
#
# This software is provided under under a slightly modified version
# of the Apache Software License. See the accompanying LICENSE file
# for more information.
#
# $Id$
#
# Description: DCE/RPC SAMR dumper.
#
# Author:
# Javier Kohen <[email protected]>
# Alberto Solino <[email protected]>
#
# Reference for:
# DCE/RPC for SAMR
import socket
import string
import sys
import types
import logging
from impacket import uuid, version
from impacket.nt_errors import STATUS_MORE_ENTRIES
from impacket.dcerpc.v5 import transport, samr
import argparse
class ListUsersException(Exception):
pass
class SAMRDump:
KNOWN_PROTOCOLS = {
'139/SMB': (r'ncacn_np:%s[\pipe\samr]', 139),
'445/SMB': (r'ncacn_np:%s[\pipe\samr]', 445),
}
def __init__(self, protocols = None,
username = '', password = '', domain = '', hashes = None, aesKey=None, doKerberos = False):
if not protocols:
self.__protocols = SAMRDump.KNOWN_PROTOCOLS.keys()
else:
self.__protocols = [protocols]
self.__username = username
self.__password = password
self.__domain = domain
self.__lmhash = ''
self.__nthash = ''
self.__aesKey = aesKey
self.__doKerberos = doKerberos
if hashes is not None:
self.__lmhash, self.__nthash = hashes.split(':')
def dump(self, addr):
"""Dumps the list of users and shares registered present at
addr. Addr is a valid host name or IP address.
"""
print 'Retrieving endpoint list from %s' % addr
# Try all requested protocols until one works.
entries = []
for protocol in self.__protocols:
protodef = SAMRDump.KNOWN_PROTOCOLS[protocol]
port = protodef[1]
print "Trying protocol %s..." % protocol
rpctransport = transport.SMBTransport(addr, port, r'\samr', self.__username, self.__password, self.__domain, self.__lmhash, self.__nthash, self.__aesKey, doKerberos = self.__doKerberos)
try:
entries = self.__fetchList(rpctransport)
except Exception, e:
print 'Protocol failed: %s' % e
else:
# Got a response. No need for further iterations.
break
# Display results.
for entry in entries:
(username, uid, user) = entry
base = "%s (%d)" % (username, uid)
print base + '/FullName:', user['FullName']
print base + '/UserComment:', user['UserComment']
print base + '/PrimaryGroupId:', user['PrimaryGroupId']
print base + '/BadPasswordCount:', user['BadPasswordCount']
print base + '/LogonCount:', user['LogonCount']
if entries:
num = len(entries)
if 1 == num:
print 'Received one entry.'
else:
print 'Received %d entries.' % num
else:
print 'No entries received.'
def __fetchList(self, rpctransport):
dce = rpctransport.get_dce_rpc()
entries = []
dce.connect()
dce.bind(samr.MSRPC_UUID_SAMR)
try:
resp = samr.hSamrConnect(dce)
serverHandle = resp['ServerHandle']
resp = samr.hSamrEnumerateDomainsInSamServer(dce, serverHandle)
domains = resp['Buffer']['Buffer']
print 'Found domain(s):'
for domain in domains:
print " . %s" % domain['Name']
print "Looking up users in domain %s" % domains[0]['Name']
resp = samr.hSamrLookupDomainInSamServer(dce, serverHandle,domains[0]['Name'] )
resp = samr.hSamrOpenDomain(dce, serverHandle = serverHandle, domainId = resp['DomainId'])
domainHandle = resp['DomainHandle']
done = False
status = STATUS_MORE_ENTRIES
enumerationContext = 0
while status == STATUS_MORE_ENTRIES:
try:
resp = samr.hSamrEnumerateUsersInDomain(dce, domainHandle, enumerationContext = enumerationContext)
except Exception, e:
if str(e).find('STATUS_MORE_ENTRIES') < 0:
raise
resp = e.get_packet()
for user in resp['Buffer']['Buffer']:
r = samr.hSamrOpenUser(dce, domainHandle, samr.USER_READ_GENERAL | samr.USER_READ_PREFERENCES | samr.USER_READ_ACCOUNT, user['RelativeId'])
print "Found user: %s, uid = %d" % (user['Name'], user['RelativeId'] )
info = samr.hSamrQueryInformationUser2(dce, r['UserHandle'],samr.USER_INFORMATION_CLASS.UserAllInformation)
entry = (user['Name'], user['RelativeId'], info['Buffer']['All'])
entries.append(entry)
samr.hSamrCloseHandle(dce, r['UserHandle'])
enumerationContext = resp['EnumerationContext']
status = resp['ErrorCode']
except ListUsersException, e:
print "Error listing users: %s" % e
dce.disconnect()
return entries
# Process command-line arguments.
if __name__ == '__main__':
print version.BANNER
parser = argparse.ArgumentParser(add_help = True, description = "This script downloads the list of users for the target system.")
parser.add_argument('target', action='store', help='[[domain/]username[:password]@]<targetName or address>')
parser.add_argument('protocol', choices=SAMRDump.KNOWN_PROTOCOLS.keys(), nargs='?', default='445/SMB', help='transport protocol (default 445/SMB)')
parser.add_argument('-debug', action='store_true', help='Turn DEBUG output ON')
group = parser.add_argument_group('authentication')
group.add_argument('-hashes', action="store", metavar = "LMHASH:NTHASH", help='NTLM hashes, format is LMHASH:NTHASH')
group.add_argument('-no-pass', action="store_true", help='don\'t ask for password (useful for -k)')
group.add_argument('-k', action="store_true", help='Use Kerberos authentication. Grabs credentials from ccache file (KRB5CCNAME) based on target parameters. If valid credentials cannot be found, it will use the ones specified in the command line')
group.add_argument('-aesKey', action="store", metavar = "hex key", help='AES key to use for Kerberos Authentication (128 or 256 bits)')
if len(sys.argv)==1:
parser.print_help()
sys.exit(1)
options = parser.parse_args()
if options.debug is True:
logging.getLogger().setLevel(logging.DEBUG)
else:
logging.getLogger().setLevel(logging.INFO)
import re
domain, username, password, address = re.compile('(?:(?:([^/@:]*)/)?([^@:]*)(?::([^@]*))?@)?(.*)').match(options.target).groups('')
if domain is None:
domain = ''
if options.aesKey is not None:
options.k = True
if password == '' and username != '' and options.hashes is None and options.no_pass is False and options.aesKey is None:
from getpass import getpass
password = getpass("Password:")
dumper = SAMRDump(options.protocol, username, password, domain, options.hashes, options.aesKey, options.k)
dumper.dump(address)
|
py
|
1a5a0f7876124a3d6e032e28d6247799e81c8461
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=protected-access
"""A `Network` is way to compose layers: the topological form of a `Model`.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import itertools
import json
import os
from six.moves import zip # pylint: disable=redefined-builtin
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.eager import context
from tensorflow.python.framework import errors
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import func_graph
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras import backend
from tensorflow.python.keras import saving
from tensorflow.python.keras.engine import base_layer
from tensorflow.python.keras.engine import base_layer_utils
from tensorflow.python.keras.engine import training_utils
from tensorflow.python.keras.mixed_precision.experimental import policy
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.keras.utils import layer_utils
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.keras.utils.io_utils import ask_to_proceed_with_overwrite
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training.tracking import base as trackable
from tensorflow.python.training.tracking import data_structures
from tensorflow.python.training.tracking import layer_utils as trackable_layer_utils
from tensorflow.python.training.tracking import util as trackable_utils
from tensorflow.python.util import nest
from tensorflow.python.util import serialization
from tensorflow.python.util import tf_inspect
# pylint: disable=g-import-not-at-top
try:
import h5py
except ImportError:
h5py = None
try:
import yaml
except ImportError:
yaml = None
# pylint: enable=g-import-not-at-top
class Network(base_layer.Layer):
"""A `Network` is a composition of layers.
`Network` is the topological form of a "model". A `Model`
is simply a `Network` with added training routines.
Two types of `Networks` exist: Graph Networks and Subclass Networks. Graph
networks are used in the Keras Functional and Sequential APIs. Subclassed
networks are used when a user subclasses the `Model` class. In general,
more Keras features are supported with Graph Networks than with Subclassed
Networks, specifically:
- Model cloning (`keras.models.clone`)
- Serialization (`model.get_config()/from_config`, `model.to_json()/to_yaml()`
- Whole-model saving (`model.save()`)
A Graph Network can be instantiated by passing two arguments to `__init__`.
The first argument is the `keras.Input` Tensors that represent the inputs
to the Network. The second argument specifies the output Tensors that
represent the outputs of this Network. Both arguments can be a nested
structure of Tensors.
Example:
```
inputs = {'x1': keras.Input(shape=(10,)), 'x2': keras.Input(shape=(1,))}
t = keras.layers.Dense(1, activation='relu')(inputs['x1'])
outputs = keras.layers.Add()([t, inputs['x2'])
network = Network(inputs, outputs)
```
A Graph Network constructed using the Functional API can also include raw
TensorFlow functions, with the exception of functions that create Variables
or assign ops.
Example:
```
inputs = keras.Input(shape=(10,))
x = keras.layers.Dense(1)(inputs)
outputs = tf.nn.relu(x)
network = Network(inputs, outputs)
```
Subclassed Networks can be instantiated via `name` and (optional) `dynamic`
keyword arguments. Subclassed Networks keep track of their Layers, and their
`call` method can be overridden. Subclassed Networks are typically created
indirectly, by subclassing the `Model` class.
Example:
```
class MyModel(keras.Model):
def __init__(self):
super(MyModel, self).__init__(name='my_model', dynamic=False)
self.layer1 = keras.layers.Dense(10, activation='relu')
def call(self, inputs):
return self.layer1(inputs)
```
"""
# See tf.Module for the usage of this property.
# The key of _layer_call_argspecs is a layer. tf.Module._flatten will fail to
# flatten the key since it is trying to convert Trackable/Layer to a string.
_TF_MODULE_IGNORED_PROPERTIES = frozenset(itertools.chain(
('_layer_call_argspecs',),
base_layer.Layer._TF_MODULE_IGNORED_PROPERTIES
))
def __init__(self, *args, **kwargs): # pylint: disable=super-init-not-called
# Signature detection
if (len(args) == 2 or
len(args) == 1 and 'outputs' in kwargs or
'inputs' in kwargs and 'outputs' in kwargs):
# Graph network
self._init_graph_network(*args, **kwargs)
else:
# Subclassed network
self._init_subclassed_network(**kwargs)
tf_utils.assert_no_legacy_layers(self.layers)
# Several Network methods have "no_automatic_dependency_tracking"
# annotations. Since Network does automatic dependency tracking on attribute
# assignment, including for common data structures such as lists, by default
# we'd have quite a few empty dependencies which users don't care about (or
# would need some way to ignore dependencies automatically, which is confusing
# when applied to user code). Some attributes, such as _layers, would cause
# structural issues (_layers being the place where Layers assigned to tracked
# attributes are stored).
#
# Aside from these aesthetic and structural issues, useless dependencies on
# empty lists shouldn't cause issues; adding or removing them will not break
# checkpoints, but may cause "all Python objects matched" assertions to fail
# (in which case less strict assertions may be substituted if necessary).
@trackable.no_automatic_dependency_tracking
def _base_init(self, name=None):
# The following are implemented as property functions:
# self.trainable_weights
# self.non_trainable_weights
# self.input_spec
# self.losses
# self.updates
self._init_set_name(name, zero_based=True)
self._activity_regularizer = None
# This acts just like the `trainable` attribute of any layer instance.
# It does not affect users of the underlying layers, only users of the
# Network instance.
self.trainable = True
self._is_compiled = False
self._expects_training_arg = False
# This is True for Sequential networks and Functional networks.
self._compute_output_and_mask_jointly = False
self.supports_masking = False
if not hasattr(self, 'optimizer'):
# Don't reset optimizer if already set.
self.optimizer = None
# Private attributes to implement compatibility with Layer.
self._maybe_create_attribute('_trainable_weights', [])
self._maybe_create_attribute('_non_trainable_weights', [])
self._updates = [] # Used in symbolic mode only.
self._losses = []
self._eager_losses = []
self._callable_losses = []
# A list of metric instances corresponding to the symbolic metric tensors
# added using the `add_metric` API.
self._metrics = []
# A dictionary that maps metric names to metric result tensors.
self._metrics_tensors = {}
self._scope = None # Never used.
self._reuse = None # Never used.
if context.executing_eagerly():
self._graph = None
else:
self._graph = ops.get_default_graph() # Used in symbolic mode only.
# A Network does not create weights of its own, thus has no dtype.
self._dtype = None
# All layers in order of horizontal graph traversal.
# Entries are unique. Includes input and output layers.
self._maybe_create_attribute('_layers', [])
# Used in symbolic mode only, only in conjunction with graph-networks
self._outbound_nodes = []
self._inbound_nodes = []
self._trackable_saver = (
trackable_utils.saver_with_op_caching(self))
# Networks do not need to do any casting of inputs or variables, because
# each of its layers will handle casting through the layer's own
# implementation. Therefore networks use the 'infer' policy, which does no
# casting.
self._mixed_precision_policy = policy.Policy('infer')
@trackable.no_automatic_dependency_tracking
def _init_graph_network(self, inputs, outputs, name=None):
self._call_convention = (base_layer_utils
.CallConvention.EXPLICIT_INPUTS_ARGUMENT)
# Normalize and set self.inputs, self.outputs.
if isinstance(inputs, list) and len(nest.flatten(inputs)) == 1:
inputs = inputs[0]
if isinstance(outputs, list) and len(nest.flatten(outputs)) == 1:
outputs = outputs[0]
self._nested_outputs = outputs
self._nested_inputs = inputs
self.inputs = nest.flatten(inputs)
self.outputs = nest.flatten(outputs)
if any(not hasattr(tensor, '_keras_history') for tensor in self.outputs):
base_layer_utils.create_keras_history(self._nested_outputs)
self._base_init(name=name)
self._validate_graph_inputs_and_outputs()
self._compute_previous_mask = (
'mask' in tf_inspect.getfullargspec(self.call).args or
hasattr(self, 'compute_mask'))
# A Network does not create weights of its own, thus it is already
# built.
self.built = True
self._compute_output_and_mask_jointly = True
self._is_graph_network = True
self._dynamic = False
# `_expects_training_arg` is True since the `training` argument is always
# present in the signature of the `call` method of a graph network.
self._expects_training_arg = True
self._input_layers = []
self._output_layers = []
self._input_coordinates = []
self._output_coordinates = []
# This is for performance optimization when calling the Network on new
# inputs. Every time the Network is called on a set on input tensors,
# we compute the output tensors, output masks and output shapes in one pass,
# then cache them here. When any of these outputs is queried later, we
# retrieve it from there instead of recomputing it.
self._output_mask_cache = {}
self._output_tensor_cache = {}
self._output_shape_cache = {}
# Build self._output_layers:
for x in self.outputs:
layer, node_index, tensor_index = x._keras_history # pylint: disable=protected-access
self._output_layers.append(layer)
self._output_coordinates.append((layer, node_index, tensor_index))
# Build self._input_layers:
for x in self.inputs:
layer, node_index, tensor_index = x._keras_history # pylint: disable=protected-access
# It's supposed to be an input layer, so only one node
# and one tensor output.
assert node_index == 0
assert tensor_index == 0
self._input_layers.append(layer)
self._input_coordinates.append((layer, node_index, tensor_index))
# Keep track of the network's nodes and layers.
nodes, nodes_by_depth, layers, layers_by_depth = _map_graph_network(
self.inputs, self.outputs)
self._network_nodes = nodes
self._nodes_by_depth = nodes_by_depth
self._layers = layers
self._layers_by_depth = layers_by_depth
self._layer_call_argspecs = {}
for layer in self._layers:
self._layer_call_argspecs[layer] = tf_inspect.getfullargspec(layer.call)
self._track_layers(layers)
# Create the node linking internal inputs to internal outputs.
base_layer.Node(
outbound_layer=self,
inbound_layers=[],
node_indices=[],
tensor_indices=[],
input_tensors=self._nested_inputs,
output_tensors=self._nested_outputs)
# Build self.input_names and self.output_names.
self._set_output_names()
self.input_names = []
self._feed_input_names = []
self._feed_inputs = []
self._feed_input_shapes = []
for i, layer in enumerate(self._input_layers):
self.input_names.append(layer.name)
if layer.is_placeholder:
self._feed_input_names.append(layer.name)
self._feed_input_shapes.append(backend.int_shape(self.inputs[i]))
self._feed_inputs.append(layer.input)
def _set_output_names(self):
"""Assigns unique names to the Network's outputs.
Output layers with multiple output tensors would otherwise lead to duplicate
names in self.output_names.
"""
uniquified = []
output_names = set()
prefix_count = {}
for layer in self._output_layers:
proposal = layer.name
while proposal in output_names:
existing_count = prefix_count.get(layer.name, 1)
proposal = '{}_{}'.format(layer.name, existing_count)
prefix_count[layer.name] = existing_count + 1
output_names.add(proposal)
uniquified.append(proposal)
self.output_names = uniquified
@trackable.no_automatic_dependency_tracking
def _init_subclassed_network(self, name=None, dynamic=False):
self._base_init(name=name)
self._is_graph_network = False
self._dynamic = dynamic
call_argspec = tf_inspect.getfullargspec(self.call)
if 'training' in call_argspec.args:
self._expects_training_arg = True
else:
self._expects_training_arg = False
self._call_convention = self._determine_call_convention(call_argspec)
self.outputs = []
self.inputs = []
self.built = False
@property
def dynamic(self):
if self._is_graph_network:
return any(layer.dynamic for layer in self.layers)
return self._dynamic or any(layer.dynamic for layer in self.layers)
def _determine_call_convention(self, call_argspec):
"""Decides how `self.call()` is invoked. See `CallConvention`."""
if call_argspec.varargs:
may_take_single_argument = False
else:
try:
# Note: tf_inspect doesn't raise a TypeError when regular inspect would,
# so we need to keep in mind that "getcallargs" may have returned
# something even though we under-specified positional arguments.
all_args = tf_inspect.getcallargs(self.call, None)
self_args = set()
for arg_name, obj in all_args.items():
if obj is self:
self_args.add(arg_name)
may_take_single_argument = True
except TypeError:
may_take_single_argument = False
if may_take_single_argument:
# A single positional argument (plus "self") is considered equivalent to
# an "inputs" argument.
all_positional_args = len(call_argspec.args)
if call_argspec.defaults is not None:
all_positional_args -= len(call_argspec.defaults)
non_self_positional_args = all_positional_args
for positional_arg_name in call_argspec.args[:all_positional_args]:
if positional_arg_name in self_args:
non_self_positional_args -= 1
if non_self_positional_args == 1:
if 'inputs' in call_argspec.args[all_positional_args:]:
raise TypeError(
"Model.call() takes a single positional argument (to which "
"inputs are passed by convention) and a separate 'inputs' "
"argument. Unable to determine which arguments are inputs.")
return base_layer_utils.CallConvention.SINGLE_POSITIONAL_ARGUMENT
if 'inputs' in call_argspec.args:
return base_layer_utils.CallConvention.EXPLICIT_INPUTS_ARGUMENT
else:
return base_layer_utils.CallConvention.POSITIONAL_ARGUMENTS_ARE_INPUTS
def _track_layers(self, layers):
"""Add Trackable dependencies on a list of Layers."""
weight_layer_index = 0
for layer_index, layer in enumerate(layers):
try:
if layer.weights:
# Keep a separate index for layers which have weights. This allows
# users to insert Layers without weights anywhere in the network
# without breaking checkpoints.
self._track_trackable(
layer, name='layer_with_weights-%d' % weight_layer_index,
overwrite=True)
weight_layer_index += 1
except ValueError:
# The layer might have weights, but may not be built yet. We just treat
# it as layer without weight.
pass
# Even if it doesn't have weights, we should still track everything in
# case it has/will have Trackable dependencies.
self._track_trackable(
layer, name='layer-%d' % layer_index, overwrite=True)
def __setattr__(self, name, value):
if not getattr(self, '_self_setattr_tracking', True):
super(Network, self).__setattr__(name, value)
return
if all(
isinstance(v, (base_layer.Layer,
data_structures.TrackableDataStructure)) or
trackable_layer_utils.has_weights(v) for v in nest.flatten(value)):
try:
self._is_graph_network
except AttributeError:
raise RuntimeError('It looks like you are subclassing `Model` and you '
'forgot to call `super(YourClass, self).__init__()`.'
' Always start with this line.')
super(Network, self).__setattr__(name, value)
# Keep track of metric instance created in subclassed model/layer.
# We do this so that we can maintain the correct order of metrics by adding
# the instance to the `metrics` list as soon as it is created.
from tensorflow.python.keras import metrics as metrics_module # pylint: disable=g-import-not-at-top
if isinstance(value, metrics_module.Metric):
self._metrics.append(value)
@property
def stateful(self):
return any((hasattr(layer, 'stateful') and layer.stateful)
for layer in self.layers)
def reset_states(self):
for layer in self.layers:
if hasattr(layer, 'reset_states') and getattr(layer, 'stateful', False):
layer.reset_states()
@property
def state_updates(self):
"""Returns the `updates` from all layers that are stateful.
This is useful for separating training updates and
state updates, e.g. when we need to update a layer's internal state
during prediction.
Returns:
A list of update ops.
"""
state_updates = []
for layer in self.layers:
if getattr(layer, 'stateful', False):
if hasattr(layer, 'updates'):
state_updates += layer.updates
return state_updates
@property
def weights(self):
"""Returns the list of all layer variables/weights.
Returns:
A list of variables.
"""
self._assert_weights_created()
weights = []
for layer in self._layers:
weights += layer.weights
weights += (self._trainable_weights + self._non_trainable_weights)
return weights
def compute_mask(self, inputs, mask):
if not self._is_graph_network:
return None
# TODO(omalleyt): b/123540974 This function is not really safe to call
# by itself because it will duplicate any updates and losses in graph
# mode by `call`ing the Layers again.
output_tensors = self._run_internal_graph(inputs, mask=mask)
return nest.map_structure(lambda t: t._keras_mask, output_tensors)
@property
def layers(self):
return trackable_layer_utils.filter_empty_layer_containers(
self._layers)
def get_layer(self, name=None, index=None):
"""Retrieves a layer based on either its name (unique) or index.
If `name` and `index` are both provided, `index` will take precedence.
Indices are based on order of horizontal graph traversal (bottom-up).
Arguments:
name: String, name of layer.
index: Integer, index of layer.
Returns:
A layer instance.
Raises:
ValueError: In case of invalid layer name or index.
"""
# TODO(fchollet): We could build a dictionary based on layer names
# since they are constant, but we have not done that yet.
if index is not None:
if len(self.layers) <= index:
raise ValueError('Was asked to retrieve layer at index ' + str(index) +
' but model only has ' + str(len(self.layers)) +
' layers.')
else:
return self.layers[index]
else:
if not name:
raise ValueError('Provide either a layer name or layer index.')
for layer in self.layers:
if layer.name == name:
return layer
raise ValueError('No such layer: ' + name)
@trackable.no_automatic_dependency_tracking
def _clear_losses(self):
"""Used every step in eager to reset losses."""
self._eager_losses = []
for layer in self.layers:
layer._clear_losses()
@property
def trainable_weights(self):
self._assert_weights_created()
return trackable_layer_utils.gather_trainable_weights(
trainable=self.trainable,
sub_layers=self._layers,
extra_variables=self._trainable_weights)
@property
def non_trainable_weights(self):
self._assert_weights_created()
return trackable_layer_utils.gather_non_trainable_weights(
trainable=self.trainable,
sub_layers=self._layers,
extra_variables=self._non_trainable_weights + self._trainable_weights)
@property
def _all_metrics_tensors(self):
"""Returns the network's symbolic metric tensors."""
# TODO(psv): Remove this property.
metrics_tensors = {}
for layer in self.layers:
if isinstance(layer, Network):
metrics_tensors.update(layer._all_metrics_tensors)
else:
metrics_tensors.update(layer._metrics_tensors)
metrics_tensors.update(self._metrics_tensors)
return metrics_tensors
@property
def input_spec(self):
"""Gets the network's input specs.
Returns:
A list of `InputSpec` instances (one per input to the model)
or a single instance if the model has only one input.
"""
# If subclassed model, can't assume anything.
if not self._is_graph_network:
return None
specs = []
for layer in self._input_layers:
if layer.input_spec is None:
specs.append(None)
else:
if not isinstance(layer.input_spec, list):
raise TypeError('Layer ' + layer.name +
' has an input_spec attribute that '
'is not a list. We expect a list. '
'Found input_spec = ' + str(layer.input_spec))
specs += layer.input_spec
if len(specs) == 1:
return specs[0]
return specs
@base_layer.default
def build(self, input_shape):
"""Builds the model based on input shapes received.
This is to be used for subclassed models, which do not know at instantiation
time what their inputs look like.
This method only exists for users who want to call `model.build()` in a
standalone way (as a substitute for calling the model on real data to
build it). It will never be called by the framework (and thus it will
never throw unexpected errors in an unrelated workflow).
Args:
input_shape: Single tuple, TensorShape, or list of shapes, where shapes
are tuples, integers, or TensorShapes.
Raises:
ValueError:
1. In case of invalid user-provided data (not of type tuple,
list, or TensorShape).
2. If the model requires call arguments that are agnostic
to the input shapes (positional or kwarg in call signature).
3. If not all layers were properly built.
4. If float type inputs are not supported within the layers.
In each of these cases, the user should build their model by calling it
on real tensor data.
"""
if self._is_graph_network:
self.built = True
return
# If subclass network
if input_shape is None:
raise ValueError('Input shape must be defined when calling build on a '
'model subclass network.')
valid_types = (tuple, list, tensor_shape.TensorShape)
if not isinstance(input_shape, valid_types):
raise ValueError('Specified input shape is not one of the valid types. '
'Please specify a batch input shape of type tuple or '
'list of input shapes. User provided '
'input type: {}'.format(type(input_shape)))
if input_shape and not self.inputs:
# We create placeholders for the `None`s in the shape and build the model
# in a Graph. Since tf.Variable is compatible with both eager execution
# and graph building, the variables created after building the model in
# a Graph are still valid when executing eagerly.
if context.executing_eagerly():
graph = func_graph.FuncGraph('build_graph')
else:
graph = backend.get_graph()
with graph.as_default():
if isinstance(input_shape, list):
x = [base_layer_utils.generate_placeholders_from_shape(shape)
for shape in input_shape]
else:
x = base_layer_utils.generate_placeholders_from_shape(input_shape)
kwargs = {}
call_signature = tf_inspect.getfullargspec(self.call)
call_args = call_signature.args
# Exclude `self`, `inputs`, and any argument with a default value.
if len(call_args) > 2:
if call_signature.defaults:
call_args = call_args[2:-len(call_signature.defaults)]
else:
call_args = call_args[2:]
for arg in call_args:
if arg == 'training':
# Case where `training` is a positional arg with no default.
kwargs['training'] = False
else:
# Has invalid call signature with unknown positional arguments.
raise ValueError(
'Currently, you cannot build your model if it has '
'positional or keyword arguments that are not '
'inputs to the model, but are required for its '
'`call` method. Instead, in order to instantiate '
'and build your model, `call` your model on real '
'tensor data with all expected call arguments.')
elif len(call_args) < 2:
# Signature without `inputs`.
raise ValueError('You can only call `build` on a model if its `call` '
'method accepts an `inputs` argument.')
try:
self.call(x, **kwargs)
except (errors.InvalidArgumentError, TypeError):
raise ValueError('You cannot build your model by calling `build` '
'if your layers do not support float type inputs. '
'Instead, in order to instantiate and build your '
'model, `call` your model on real tensor data (of '
'the correct dtype).')
if self._layers:
self._track_layers(self._layers)
self.built = True
def call(self, inputs, training=None, mask=None):
"""Calls the model on new inputs.
In this case `call` just reapplies
all ops in the graph to the new inputs
(e.g. build a new computational graph from the provided inputs).
Arguments:
inputs: A tensor or list of tensors.
training: Boolean or boolean scalar tensor, indicating whether to run
the `Network` in training mode or inference mode.
mask: A mask or list of masks. A mask can be
either a tensor or None (no mask).
Returns:
A tensor if there is a single output, or
a list of tensors if there are more than one outputs.
"""
if not self._is_graph_network:
raise NotImplementedError('When subclassing the `Model` class, you should'
' implement a `call` method.')
return self._run_internal_graph(inputs, training=training, mask=mask)
def compute_output_shape(self, input_shape):
if not self._is_graph_network:
return super(Network, self).compute_output_shape(input_shape)
# Convert any shapes in tuple format to TensorShapes.
input_shape = tf_utils.convert_shapes(input_shape, to_tuples=False)
if len(nest.flatten(input_shape)) != len(nest.flatten(self._input_layers)):
raise ValueError('Invalid input_shape argument ' + str(input_shape) +
': model has ' + str(len(self._input_layers)) +
' tensor inputs.')
cache_key = generic_utils.object_list_uid(input_shape)
if cache_key in self._output_shape_cache:
# Cache hit. Return shapes as TensorShapes.
return self._output_shape_cache[cache_key]
layers_to_output_shapes = {}
for layer, shape in zip(self._input_layers, nest.flatten(input_shape)):
# It's an input layer: then `compute_output_shape` is identity,
# and there is only one node and one tensor..
shape_key = layer.name + '_0_0'
layers_to_output_shapes[shape_key] = shape
depth_keys = list(self._nodes_by_depth.keys())
depth_keys.sort(reverse=True)
# Iterate over nodes, by depth level.
if len(depth_keys) > 1:
for depth in depth_keys:
nodes = self._nodes_by_depth[depth]
for node in nodes:
# This is always a single layer, never a list.
layer = node.outbound_layer
if layer in self._input_layers:
# We've already covered the input layers
# a few lines above.
continue
# Potentially redundant list,
# same size as node.input_tensors.
layer_input_shapes = []
for inbound_layer, node_id, tensor_id, _ in node.iterate_inbound():
input_layer_key = inbound_layer.name + '_%s_%s' % (node_id,
tensor_id)
layer_input_shapes.append(layers_to_output_shapes[input_layer_key])
layer_input_shapes = nest.pack_sequence_as(node.inbound_layers,
layer_input_shapes)
# Layers expect shapes to be tuples for `compute_output_shape`.
layer_input_shapes = tf_utils.convert_shapes(
layer_input_shapes, to_tuples=True)
layer_output_shapes = layer.compute_output_shape(layer_input_shapes)
# Convert back to TensorShapes.
layer_output_shapes = tf_utils.convert_shapes(
layer_output_shapes, to_tuples=False)
node_index = layer._inbound_nodes.index(node) # pylint: disable=protected-access
for j, shape in enumerate(nest.flatten(layer_output_shapes)):
shape_key = layer.name + '_%s_%s' % (node_index, j)
layers_to_output_shapes[shape_key] = shape
# Read final output shapes from layers_to_output_shapes.
output_shapes = []
for i in range(len(self._output_layers)):
layer, node_index, tensor_index = self._output_coordinates[i]
shape_key = layer.name + '_%s_%s' % (node_index, tensor_index)
output_shapes.append(layers_to_output_shapes[shape_key])
output_shapes = nest.pack_sequence_as(self._nested_outputs, output_shapes)
# Store in cache.
self._output_shape_cache[cache_key] = output_shapes
# Return shapes as TensorShapes.
return output_shapes
def _run_internal_graph(self, inputs, training=None, mask=None):
"""Computes output tensors for new inputs.
# Note:
- Expects `inputs` to be a list (potentially with 1 element).
- Can be run on non-Keras tensors.
Arguments:
inputs: Tensor or nested structure of Tensors.
training: Boolean learning phase.
mask: (Optional) Tensor or nested structure of Tensors.
Returns:
Two lists: output_tensors, output_masks
"""
# Note: masking support is relevant mainly for Keras.
# It cannot be factored out without having the fully reimplement the network
# calling logic on the Keras side. We choose to incorporate it in
# Network because 1) it may be useful to fully support in tf.layers in
# the future and 2) Keras is a major user of Network. If you don't
# use masking, it does not interfere with regular behavior at all and you
# can ignore it.
inputs = nest.flatten(inputs)
if mask is None:
masks = [None for _ in range(len(inputs))]
else:
masks = nest.flatten(mask)
for input_t, mask in zip(inputs, masks):
input_t._keras_mask = mask
# Dictionary mapping reference tensors to computed tensors.
tensor_dict = {}
for x, y, mask in zip(self.inputs, inputs, masks):
tensor_dict[str(id(x))] = y
depth_keys = list(self._nodes_by_depth.keys())
depth_keys.sort(reverse=True)
# Ignore the InputLayers when computing the graph.
depth_keys = depth_keys[1:]
for depth in depth_keys:
nodes = self._nodes_by_depth[depth]
for node in nodes:
# This is always a single layer, never a list.
layer = node.outbound_layer
if all(
str(id(tensor)) in tensor_dict
for tensor in nest.flatten(node.input_tensors)):
# Call layer (reapplying ops to new inputs).
computed_tensors = nest.map_structure(
lambda t: tensor_dict[str(id(t))], node.input_tensors)
# Ensure `training` and `mask` arg propagation if applicable.
kwargs = node.arguments or {}
argspec = self._layer_call_argspecs[layer].args
if 'training' in argspec:
kwargs.setdefault('training', training)
if 'mask' in argspec:
computed_masks = nest.map_structure(
lambda t: getattr(t, '_keras_mask', None),
computed_tensors)
kwargs.setdefault('mask', computed_masks)
# Compute outputs.
output_tensors = layer(computed_tensors, **kwargs)
# Update tensor_dict.
for x, y in zip(
nest.flatten(node.output_tensors), nest.flatten(output_tensors)):
tensor_dict[str(id(x))] = y
output_tensors = []
output_shapes = []
for x in self.outputs:
assert str(id(x)) in tensor_dict, 'Could not compute output ' + str(x)
tensor = tensor_dict[str(id(x))]
output_shapes.append(x.shape)
output_tensors.append(tensor)
if output_shapes is not None:
input_shapes = [x.shape for x in inputs]
cache_key = generic_utils.object_list_uid(input_shapes)
self._output_shape_cache[cache_key] = nest.pack_sequence_as(
self._nested_outputs, output_shapes)
output_tensors = nest.pack_sequence_as(self._nested_outputs, output_tensors)
return output_tensors
def get_config(self):
if not self._is_graph_network:
raise NotImplementedError
config = {
'name': self.name,
}
node_conversion_map = {}
for layer in self.layers:
if issubclass(layer.__class__, Network):
# Networks start with a pre-existing node
# linking their input to output.
kept_nodes = 1
else:
kept_nodes = 0
for original_node_index, node in enumerate(layer._inbound_nodes):
node_key = _make_node_key(layer.name, original_node_index)
if node_key in self._network_nodes:
node_conversion_map[node_key] = kept_nodes
kept_nodes += 1
layer_configs = []
for layer in self.layers: # From the earliest layers on.
layer_class_name = layer.__class__.__name__
layer_config = layer.get_config()
filtered_inbound_nodes = []
for original_node_index, node in enumerate(layer._inbound_nodes):
node_key = _make_node_key(layer.name, original_node_index)
if node_key in self._network_nodes:
# The node is relevant to the model:
# add to filtered_inbound_nodes.
if node.arguments:
try:
json.dumps(node.arguments)
kwargs = node.arguments
except TypeError:
logging.warning(
'Layer ' + layer.name +
' was passed non-serializable keyword arguments: ' +
str(node.arguments) + '. They will not be included '
'in the serialized model (and thus will be missing '
'at deserialization time).')
kwargs = {}
else:
kwargs = {}
if node.inbound_layers:
node_data = []
for inbound_layer, node_id, tensor_id, _ in node.iterate_inbound():
node_key = _make_node_key(inbound_layer.name, node_id)
new_node_index = node_conversion_map.get(node_key, 0)
node_data.append(
tf_utils.ListWrapper(
[inbound_layer.name, new_node_index, tensor_id, kwargs]))
node_data = nest.pack_sequence_as(node.input_tensors, node_data)
if not nest.is_sequence(node_data):
node_data = [node_data]
# Convert ListWrapper to list for backwards compatible configs.
node_data = tf_utils.convert_inner_node_data(node_data)
filtered_inbound_nodes.append(node_data)
layer_configs.append({
'name': layer.name,
'class_name': layer_class_name,
'config': layer_config,
'inbound_nodes': filtered_inbound_nodes,
})
config['layers'] = layer_configs
# Gather info about inputs and outputs.
model_inputs = []
for i in range(len(self._input_layers)):
layer, node_index, tensor_index = self._input_coordinates[i]
node_key = _make_node_key(layer.name, node_index)
if node_key not in self._network_nodes:
continue
new_node_index = node_conversion_map[node_key]
model_inputs.append(
tf_utils.ListWrapper([layer.name, new_node_index, tensor_index]))
model_inputs = nest.pack_sequence_as(self._nested_inputs, model_inputs)
# Preserve external Keras compat for Models with single input.
if not nest.is_sequence(model_inputs):
model_inputs = [model_inputs]
model_inputs = tf_utils.convert_inner_node_data(model_inputs)
config['input_layers'] = model_inputs
model_outputs = []
for i in range(len(self._output_layers)):
layer, node_index, tensor_index = self._output_coordinates[i]
node_key = _make_node_key(layer.name, node_index)
if node_key not in self._network_nodes:
continue
new_node_index = node_conversion_map[node_key]
model_outputs.append(
tf_utils.ListWrapper([layer.name, new_node_index, tensor_index]))
model_outputs = nest.pack_sequence_as(self._nested_outputs, model_outputs)
# Preserve external Keras compat for Models with single output.
if not nest.is_sequence(model_outputs):
model_outputs = [model_outputs]
model_outputs = tf_utils.convert_inner_node_data(model_outputs)
config['output_layers'] = model_outputs
return copy.deepcopy(config)
@classmethod
def from_config(cls, config, custom_objects=None):
"""Instantiates a Model from its config (output of `get_config()`).
Arguments:
config: Model config dictionary.
custom_objects: Optional dictionary mapping names
(strings) to custom classes or functions to be
considered during deserialization.
Returns:
A model instance.
Raises:
ValueError: In case of improperly formatted config dict.
"""
# Layer instances created during
# the graph reconstruction process
created_layers = {}
# Dictionary mapping layer instances to
# node data that specifies a layer call.
# It acts as a queue that maintains any unprocessed
# layer call until it becomes possible to process it
# (i.e. until the input tensors to the call all exist).
unprocessed_nodes = {}
def add_unprocessed_node(layer, node_data):
if layer not in unprocessed_nodes:
unprocessed_nodes[layer] = [node_data]
else:
unprocessed_nodes[layer].append(node_data)
def process_node(layer, node_data):
"""Deserialize a node.
Arguments:
layer: layer instance.
node_data: Nested structure of `ListWrapper`.
Raises:
ValueError: In case of improperly formatted `node_data`.
"""
input_tensors = []
for input_data in nest.flatten(node_data):
input_data = input_data.as_list()
inbound_layer_name = input_data[0]
inbound_node_index = input_data[1]
inbound_tensor_index = input_data[2]
if len(input_data) == 3:
kwargs = {}
elif len(input_data) == 4:
kwargs = input_data[3]
else:
raise ValueError('Improperly formatted model config.')
inbound_layer = created_layers[inbound_layer_name]
if len(inbound_layer._inbound_nodes) <= inbound_node_index:
add_unprocessed_node(layer, node_data)
return
inbound_node = inbound_layer._inbound_nodes[inbound_node_index]
input_tensors.append(
nest.flatten(inbound_node.output_tensors)[inbound_tensor_index])
input_tensors = nest.pack_sequence_as(node_data, input_tensors)
# Call layer on its inputs, thus creating the node
# and building the layer if needed.
if input_tensors is not None:
# Preserve compatibility with older configs.
flat_input_tensors = nest.flatten(input_tensors)
if len(flat_input_tensors) == 1:
layer(flat_input_tensors[0], **kwargs)
else:
layer(input_tensors, **kwargs)
def process_layer(layer_data):
"""Deserializes a layer, then call it on appropriate inputs.
Arguments:
layer_data: layer config dict.
Raises:
ValueError: In case of improperly formatted `layer_data` dict.
"""
layer_name = layer_data['name']
# Instantiate layer.
from tensorflow.python.keras.layers import deserialize as deserialize_layer # pylint: disable=g-import-not-at-top
layer = deserialize_layer(layer_data, custom_objects=custom_objects)
created_layers[layer_name] = layer
# Gather layer inputs and convert to `ListWrapper` objects.
inbound_nodes_data = layer_data['inbound_nodes']
inbound_nodes_data = tf_utils.convert_inner_node_data(
inbound_nodes_data, wrap=True)
for node_data in inbound_nodes_data:
# We don't process nodes (i.e. make layer calls)
# on the fly because the inbound node may not yet exist,
# in case of layer shared at different topological depths
# (e.g. a model such as A(B(A(B(x)))))
add_unprocessed_node(layer, node_data)
# First, we create all layers and enqueue nodes to be processed
for layer_data in config['layers']:
process_layer(layer_data)
# Then we process nodes in order of layer depth.
# Nodes that cannot yet be processed (if the inbound node
# does not yet exist) are re-enqueued, and the process
# is repeated until all nodes are processed.
while unprocessed_nodes:
for layer_data in config['layers']:
layer = created_layers[layer_data['name']]
if layer in unprocessed_nodes:
for node_data in unprocessed_nodes.pop(layer):
process_node(layer, node_data)
name = config.get('name')
input_tensors = []
output_tensors = []
input_layers = tf_utils.convert_inner_node_data(
config['input_layers'], wrap=True)
for layer_data in nest.flatten(input_layers):
layer_name, node_index, tensor_index = layer_data.as_list()
assert layer_name in created_layers
layer = created_layers[layer_name]
layer_output_tensors = layer._inbound_nodes[node_index].output_tensors
input_tensors.append(nest.flatten(layer_output_tensors)[tensor_index])
output_layers = tf_utils.convert_inner_node_data(
config['output_layers'], wrap=True)
for layer_data in nest.flatten(output_layers):
layer_name, node_index, tensor_index = layer_data.as_list()
assert layer_name in created_layers
layer = created_layers[layer_name]
layer_output_tensors = layer._inbound_nodes[node_index].output_tensors
output_tensors.append(nest.flatten(layer_output_tensors)[tensor_index])
input_tensors = nest.pack_sequence_as(input_layers, input_tensors)
output_tensors = nest.pack_sequence_as(output_layers, output_tensors)
model = cls(inputs=input_tensors, outputs=output_tensors, name=name)
# Layers not connected to outputs, such as those added in `add_loss`.
ancillary_layers = [
layer for layer in created_layers.values() if layer not in model.layers
]
if ancillary_layers:
model._insert_layers(ancillary_layers)
return model
def save(self,
filepath,
overwrite=True,
include_optimizer=True,
save_format=None):
"""Saves the model to Tensorflow SavedModel or a single HDF5 file.
The savefile includes:
- The model architecture, allowing to re-instantiate the model.
- The model weights.
- The state of the optimizer, allowing to resume training
exactly where you left off.
This allows you to save the entirety of the state of a model
in a single file.
Saved models can be reinstantiated via `keras.models.load_model`.
The model returned by `load_model`
is a compiled model ready to be used (unless the saved model
was never compiled in the first place).
Arguments:
filepath: String, path to SavedModel or H5 file to save the model.
overwrite: Whether to silently overwrite any existing file at the
target location, or provide the user with a manual prompt.
include_optimizer: If True, save optimizer's state together.
save_format: Either 'tf' or 'h5', indicating whether to save the model
to Tensorflow SavedModel or HDF5. The default is currently 'h5', but
will switch to 'tf' in TensorFlow 2.0. The 'tf' option is currently
disabled (use `tf.keras.experimental.export_saved_model` instead).
Example:
```python
from keras.models import load_model
model.save('my_model.h5') # creates a HDF5 file 'my_model.h5'
del model # deletes the existing model
# returns a compiled model
# identical to the previous one
model = load_model('my_model.h5')
```
"""
saving.save_model(self, filepath, overwrite, include_optimizer, save_format)
def save_weights(self, filepath, overwrite=True, save_format=None):
"""Saves all layer weights.
Either saves in HDF5 or in TensorFlow format based on the `save_format`
argument.
When saving in HDF5 format, the weight file has:
- `layer_names` (attribute), a list of strings
(ordered names of model layers).
- For every layer, a `group` named `layer.name`
- For every such layer group, a group attribute `weight_names`,
a list of strings
(ordered names of weights tensor of the layer).
- For every weight in the layer, a dataset
storing the weight value, named after the weight tensor.
When saving in TensorFlow format, all objects referenced by the network are
saved in the same format as `tf.train.Checkpoint`, including any `Layer`
instances or `Optimizer` instances assigned to object attributes. For
networks constructed from inputs and outputs using `tf.keras.Model(inputs,
outputs)`, `Layer` instances used by the network are tracked/saved
automatically. For user-defined classes which inherit from `tf.keras.Model`,
`Layer` instances must be assigned to object attributes, typically in the
constructor. See the documentation of `tf.train.Checkpoint` and
`tf.keras.Model` for details.
Arguments:
filepath: String, path to the file to save the weights to. When saving
in TensorFlow format, this is the prefix used for checkpoint files
(multiple files are generated). Note that the '.h5' suffix causes
weights to be saved in HDF5 format.
overwrite: Whether to silently overwrite any existing file at the
target location, or provide the user with a manual prompt.
save_format: Either 'tf' or 'h5'. A `filepath` ending in '.h5' or
'.keras' will default to HDF5 if `save_format` is `None`. Otherwise
`None` defaults to 'tf'.
Raises:
ImportError: If h5py is not available when attempting to save in HDF5
format.
ValueError: For invalid/unknown format arguments.
"""
self._assert_weights_created()
filepath_is_h5 = _is_hdf5_filepath(filepath)
if save_format is None:
if filepath_is_h5:
save_format = 'h5'
else:
save_format = 'tf'
else:
user_format = save_format.lower().strip()
if user_format in ('tensorflow', 'tf'):
save_format = 'tf'
elif user_format in ('hdf5', 'h5', 'keras'):
save_format = 'h5'
else:
raise ValueError(
'Unknown format "%s". Was expecting one of {"tf", "h5"}.' % (
save_format,))
if save_format == 'tf' and filepath_is_h5:
raise ValueError(
('save_weights got save_format="tf"/"tensorflow", but the '
'filepath ("%s") looks like an HDF5 file. Omit the ".h5"/".keras" '
'when saving in TensorFlow format.')
% filepath)
if save_format == 'h5' and h5py is None:
raise ImportError(
'`save_weights` requires h5py when saving in hdf5.')
if save_format == 'tf':
check_filepath = filepath + '.index'
else:
check_filepath = filepath
# If file exists and should not be overwritten:
if not overwrite and os.path.isfile(check_filepath):
proceed = ask_to_proceed_with_overwrite(check_filepath)
if not proceed:
return
if save_format == 'h5':
with h5py.File(filepath, 'w') as f:
saving.save_weights_to_hdf5_group(f, self.layers)
else:
if context.executing_eagerly():
session = None
else:
session = backend.get_session()
optimizer = getattr(self, 'optimizer', None)
if (optimizer
and not isinstance(optimizer, trackable.Trackable)):
logging.warning(
('This model was compiled with a Keras optimizer (%s) but is being '
'saved in TensorFlow format with `save_weights`. The model\'s '
'weights will be saved, but unlike with TensorFlow optimizers in '
'the TensorFlow format the optimizer\'s state will not be '
'saved.\n\nConsider using a TensorFlow optimizer from `tf.train`.')
% (optimizer,))
self._trackable_saver.save(filepath, session=session)
# Record this checkpoint so it's visible from tf.train.latest_checkpoint.
checkpoint_management.update_checkpoint_state_internal(
save_dir=os.path.dirname(filepath),
model_checkpoint_path=filepath,
save_relative_paths=True,
all_model_checkpoint_paths=[filepath])
def load_weights(self, filepath, by_name=False):
"""Loads all layer weights, either from a TensorFlow or an HDF5 weight file.
If `by_name` is False weights are loaded based on the network's
topology. This means the architecture should be the same as when the weights
were saved. Note that layers that don't have weights are not taken into
account in the topological ordering, so adding or removing layers is fine as
long as they don't have weights.
If `by_name` is True, weights are loaded into layers only if they share the
same name. This is useful for fine-tuning or transfer-learning models where
some of the layers have changed.
Only topological loading (`by_name=False`) is supported when loading weights
from the TensorFlow format. Note that topological loading differs slightly
between TensorFlow and HDF5 formats for user-defined classes inheriting from
`tf.keras.Model`: HDF5 loads based on a flattened list of weights, while the
TensorFlow format loads based on the object-local names of attributes to
which layers are assigned in the `Model`'s constructor.
Arguments:
filepath: String, path to the weights file to load. For weight files in
TensorFlow format, this is the file prefix (the same as was passed
to `save_weights`).
by_name: Boolean, whether to load weights by name or by topological
order. Only topological loading is supported for weight files in
TensorFlow format.
Returns:
When loading a weight file in TensorFlow format, returns the same status
object as `tf.train.Checkpoint.restore`. When graph building, restore
ops are run automatically as soon as the network is built (on first call
for user-defined classes inheriting from `Model`, immediately if it is
already built).
When loading weights in HDF5 format, returns `None`.
Raises:
ImportError: If h5py is not available and the weight file is in HDF5
format.
"""
if _is_hdf5_filepath(filepath):
save_format = 'h5'
else:
try:
pywrap_tensorflow.NewCheckpointReader(filepath)
save_format = 'tf'
except errors_impl.DataLossError:
# The checkpoint is not readable in TensorFlow format. Try HDF5.
save_format = 'h5'
if save_format == 'tf':
status = self._trackable_saver.restore(filepath)
if by_name:
raise NotImplementedError(
'Weights may only be loaded based on topology into Models when '
'loading TensorFlow-formatted weights (got by_name=True to '
'load_weights).')
if not context.executing_eagerly():
session = backend.get_session()
# Restore existing variables (if any) immediately, and set up a
# streaming restore for any variables created in the future.
trackable_utils.streaming_restore(status=status, session=session)
status.assert_nontrivial_match()
return status
if h5py is None:
raise ImportError(
'`load_weights` requires h5py when loading weights from HDF5.')
if self._is_graph_network and not self.built:
raise NotImplementedError(
'Unable to load weights saved in HDF5 format into a subclassed '
'Model which has not created its variables yet. Call the Model '
'first, then load the weights.')
self._assert_weights_created()
with h5py.File(filepath, 'r') as f:
if 'layer_names' not in f.attrs and 'model_weights' in f:
f = f['model_weights']
if by_name:
saving.load_weights_from_hdf5_group_by_name(f, self.layers)
else:
saving.load_weights_from_hdf5_group(f, self.layers)
def _updated_config(self):
"""Util shared between different serialization methods.
Returns:
Model config with Keras version information added.
"""
from tensorflow.python.keras import __version__ as keras_version # pylint: disable=g-import-not-at-top
config = self.get_config()
model_config = {
'class_name': self.__class__.__name__,
'config': config,
'keras_version': keras_version,
'backend': backend.backend()
}
return model_config
def to_json(self, **kwargs):
"""Returns a JSON string containing the network configuration.
To load a network from a JSON save file, use
`keras.models.model_from_json(json_string, custom_objects={})`.
Arguments:
**kwargs: Additional keyword arguments
to be passed to `json.dumps()`.
Returns:
A JSON string.
"""
model_config = self._updated_config()
return json.dumps(
model_config, default=serialization.get_json_type, **kwargs)
def to_yaml(self, **kwargs):
"""Returns a yaml string containing the network configuration.
To load a network from a yaml save file, use
`keras.models.model_from_yaml(yaml_string, custom_objects={})`.
`custom_objects` should be a dictionary mapping
the names of custom losses / layers / etc to the corresponding
functions / classes.
Arguments:
**kwargs: Additional keyword arguments
to be passed to `yaml.dump()`.
Returns:
A YAML string.
Raises:
ImportError: if yaml module is not found.
"""
if yaml is None:
raise ImportError(
'Requires yaml module installed (`pip install pyyaml`).')
return yaml.dump(self._updated_config(), **kwargs)
def summary(self, line_length=None, positions=None, print_fn=None):
"""Prints a string summary of the network.
Arguments:
line_length: Total length of printed lines
(e.g. set this to adapt the display to different
terminal window sizes).
positions: Relative or absolute positions of log elements
in each line. If not provided,
defaults to `[.33, .55, .67, 1.]`.
print_fn: Print function to use. Defaults to `print`.
It will be called on each line of the summary.
You can set it to a custom function
in order to capture the string summary.
Raises:
ValueError: if `summary()` is called before the model is built.
"""
if not self.built:
raise ValueError('This model has not yet been built. '
'Build the model first by calling `build()` or calling '
'`fit()` with some data, or specify '
'an `input_shape` argument in the first layer(s) for '
'automatic build.')
layer_utils.print_summary(self,
line_length=line_length,
positions=positions,
print_fn=print_fn)
def _validate_graph_inputs_and_outputs(self):
"""Validates the inputs and outputs of a Graph Network."""
# Check for redundancy in inputs.
if len(set(self.inputs)) != len(self.inputs):
raise ValueError('The list of inputs passed to the model '
'is redundant. '
'All inputs should only appear once.'
' Found: ' + str(self.inputs))
for x in self.inputs:
# Check that x has appropriate `_keras_history` metadata.
if not hasattr(x, '_keras_history'):
cls_name = self.__class__.__name__
raise ValueError('Input tensors to a ' + cls_name + ' ' +
'must come from `tf.keras.Input`. '
'Received: ' + str(x) +
' (missing previous layer metadata).')
# Check that x is an input tensor.
# pylint: disable=protected-access
layer, _, _ = x._keras_history
if len(layer._inbound_nodes) > 1 or (
layer._inbound_nodes and layer._inbound_nodes[0].inbound_layers):
cls_name = self.__class__.__name__
logging.warning(cls_name + ' inputs must come from '
'`tf.keras.Input` (thus holding past layer metadata), '
'they cannot be the output of '
'a previous non-Input layer. '
'Here, a tensor specified as '
'input to "' + self.name + '" was not an Input tensor, '
'it was generated by layer ' + layer.name + '.\n'
'Note that input tensors are '
'instantiated via `tensor = tf.keras.Input(shape)`.\n'
'The tensor that caused the issue was: ' + str(x.name))
# Check compatibility of batch sizes of Input Layers.
input_batch_sizes = [
training_utils.get_static_batch_size(x._keras_history[0])
for x in self.inputs
]
consistent_batch_size = None
for batch_size in input_batch_sizes:
if batch_size is not None:
if (consistent_batch_size is not None and
batch_size != consistent_batch_size):
raise ValueError('The specified batch sizes of the Input Layers'
' are incompatible. Found batch sizes: {}'.format(
input_batch_sizes))
consistent_batch_size = batch_size
for x in self.outputs:
if not hasattr(x, '_keras_history'):
cls_name = self.__class__.__name__
raise ValueError('Output tensors to a ' + cls_name + ' must be '
'the output of a TensorFlow `Layer` '
'(thus holding past layer metadata). Found: ' + str(x))
def _insert_layers(self, layers, relevant_nodes=None):
"""Inserts Layers into the Network after Network creation.
This is only valid for Keras Graph Networks. Layers added via this function
will be included in the `call` computation and `get_config` of this Network.
They will not be added to the Network's outputs.
Arguments:
layers: Arbitrary nested structure of Layers. Layers must be reachable
from one or more of the `keras.Input` Tensors that correspond to this
Network's inputs.
relevant_nodes: Nodes from the Layers that should be considered part of
this Network. If `None`, all Nodes will be considered part of this
Network.
Raises:
ValueError: If the layers depend on `Input`s not found in this Model.
"""
layers = nest.flatten(layers)
tf_utils.assert_no_legacy_layers(layers)
node_to_depth = {}
for depth, nodes in self._nodes_by_depth.items():
node_to_depth.update({node: depth for node in nodes})
# The nodes of these Layers that are relevant to this Network. If not
# provided, assume all Nodes are relevant
if not relevant_nodes:
relevant_nodes = nest.flatten([layer._inbound_nodes for layer in layers])
network_nodes = set(relevant_nodes + list(node_to_depth.keys()))
def _get_min_depth(node):
"""Gets the minimum depth at which node can be computed."""
min_depth = 0
for layer, node_id, _, _ in node.iterate_inbound():
inbound_node = layer._inbound_nodes[node_id]
if inbound_node in node_to_depth:
min_depth = min(min_depth, node_to_depth[inbound_node])
elif inbound_node not in network_nodes:
continue
else:
# Previous relevant nodes haven't been processed yet.
return None
# New node is one shallower than its shallowest input.
return min_depth - 1
# Insert nodes into `_nodes_by_depth` and other node attrs.
unprocessed_nodes = copy.copy(relevant_nodes)
i = 0
while unprocessed_nodes:
i += 1
# Do a sanity check. This can occur if `Input`s from outside this Model
# are being relied on.
if i > 10000:
raise ValueError('Layers could not be added due to missing '
'dependencies.')
node = unprocessed_nodes.pop(0)
depth = _get_min_depth(node)
if depth is None:
unprocessed_nodes.append(node)
else:
node_key = _make_node_key(
node.outbound_layer.name,
node.outbound_layer._inbound_nodes.index(node))
node_to_depth[node] = depth
self._network_nodes.add(node_key)
self._nodes_by_depth[depth].append(node)
# Insert layers into `_layer_by_depth` and other layer attrs.
for layer in layers:
depth = min([
node_to_depth[node]
for node in layer.inbound_nodes
if node in network_nodes
])
self._layers_by_depth[depth].append(layer)
self._layers.append(layer)
self._layer_call_argspecs[layer] = tf_inspect.getfullargspec(layer.call)
def _assert_weights_created(self):
"""Asserts that all the weights for the network have been created.
For a non-dynamic network, the weights must already be created after the
layer has been called. For a dynamic network, the exact list of weights can
never be known for certain since it may change at any time during execution.
We run this check right before accessing weights or getting the Numpy value
for the current weights. Otherwise, if the layer has never been called,
the user would just get an empty list, which is misleading.
Raises:
ValueError: if the weights of the network has not yet been created.
"""
if self.dynamic:
return
if (not self._is_graph_network and
'build' in self.__class__.__dict__ and
not self.built):
# For any model that has customized build() method but hasn't
# been invoked yet, this will cover both sequential and subclass model.
raise ValueError('Weights for model %s have not yet been created. '
'Weights are created when the Model is first called on '
'inputs or `build()` is called with an `input_shape`.' %
self.name)
def _is_hdf5_filepath(filepath):
return (filepath.endswith('.h5') or filepath.endswith('.keras') or
filepath.endswith('.hdf5'))
def _make_node_key(layer_name, node_index):
return layer_name + '_ib-' + str(node_index)
def _map_graph_network(inputs, outputs):
"""Validates a network's topology and gather its layers and nodes.
Arguments:
inputs: List of input tensors.
outputs: List of outputs tensors.
Returns:
A tuple `(nodes, nodes_by_depth, layers, layers_by_depth)`.
- nodes: list of Node instances.
- nodes_by_depth: dict mapping ints (depth) to lists of node instances.
- layers: list of Layer instances.
- layers_by_depth: dict mapping ints (depth) to lists of layer instances.
Raises:
ValueError: In case the network is not valid (e.g. disconnected graph).
"""
# Network_nodes: set of nodes included in the graph of layers
# (not all nodes included in the layers are relevant to the current graph).
network_nodes = set() # ids of all nodes relevant to the Network
nodes_depths = {} # dict {node: depth value}
layers_depths = {} # dict {layer: depth value}
layer_indices = {} # dict {layer: index in traversal}
nodes_in_decreasing_depth = []
def build_map(tensor,
finished_nodes,
nodes_in_progress,
layer,
node_index,
tensor_index):
"""Builds a map of the graph of layers.
This recursively updates the map `layer_indices`,
the list `nodes_in_decreasing_depth` and the set `network_nodes`.
Arguments:
tensor: Some tensor in a graph.
finished_nodes: Set of nodes whose subgraphs have been traversed
completely. Useful to prevent duplicated work.
nodes_in_progress: Set of nodes that are currently active on the
recursion stack. Useful to detect cycles.
layer: Layer from which `tensor` comes from. If not provided,
will be obtained from `tensor._keras_history`.
node_index: Node index from which `tensor` comes from.
tensor_index: Tensor_index from which `tensor` comes from.
Raises:
ValueError: if a cycle is detected.
"""
node = layer._inbound_nodes[node_index] # pylint: disable=protected-access
# Prevent cycles.
if node in nodes_in_progress:
raise ValueError('The tensor ' + str(tensor) + ' at layer "' +
layer.name + '" is part of a cycle.')
# Don't repeat work for shared subgraphs
if node in finished_nodes:
return
node_key = _make_node_key(layer.name, node_index)
# Update network_nodes.
network_nodes.add(node_key)
# Store the traversal order for layer sorting.
if layer not in layer_indices:
layer_indices[layer] = len(layer_indices)
nodes_in_progress.add(node)
# Propagate to all previous tensors connected to this node.
for layer, node_index, tensor_index, tensor in node.iterate_inbound():
build_map(tensor, finished_nodes, nodes_in_progress, layer, node_index,
tensor_index)
finished_nodes.add(node)
nodes_in_progress.remove(node)
nodes_in_decreasing_depth.append(node)
finished_nodes = set()
nodes_in_progress = set()
for x in outputs:
layer, node_index, tensor_index = x._keras_history # pylint: disable=protected-access
build_map(x, finished_nodes, nodes_in_progress,
layer=layer,
node_index=node_index,
tensor_index=tensor_index)
for node in reversed(nodes_in_decreasing_depth):
# If the depth is not set, the node has no outbound nodes (depth 0).
depth = nodes_depths.setdefault(node, 0)
# Update the depth of the corresponding layer
previous_depth = layers_depths.get(node.outbound_layer, 0)
# If we've seen this layer before at a higher depth,
# we should use that depth instead of the node depth.
# This is necessary for shared layers that have inputs at different
# depth levels in the graph.
depth = max(depth, previous_depth)
layers_depths[node.outbound_layer] = depth
nodes_depths[node] = depth
# Update the depth of inbound nodes.
# The "depth" of a node is the max of the depths
# of all layers it is connected to.
for inbound_layer, node_index, _, _ in node.iterate_inbound():
inbound_node = inbound_layer._inbound_nodes[node_index] # pylint: disable=protected-access
previous_depth = nodes_depths.get(inbound_node, 0)
nodes_depths[inbound_node] = max(depth + 1, previous_depth)
# Handle inputs that are not connected to outputs.
for input_t in inputs:
input_layer = input_t._keras_history[0]
if input_layer not in layers_depths:
layers_depths[input_layer] = 0
layer_indices[input_layer] = -1
nodes_depths[input_layer._inbound_nodes[0]] = 0
# Build a dict {depth: list of nodes with this depth}
nodes_by_depth = collections.defaultdict(list)
for node, depth in nodes_depths.items():
nodes_by_depth[depth].append(node)
# Build a dict {depth: list of layers with this depth}
layers_by_depth = collections.defaultdict(list)
for layer, depth in layers_depths.items():
layers_by_depth[depth].append(layer)
# Get sorted list of layer depths.
depth_keys = list(layers_by_depth.keys())
depth_keys.sort(reverse=True)
# Set self.layers and self._layers_by_depth.
layers = []
for depth in depth_keys:
layers_for_depth = layers_by_depth[depth]
# Network.layers needs to have a deterministic order:
# here we order them by traversal order.
layers_for_depth.sort(key=lambda x: layer_indices[x])
layers.extend(layers_for_depth)
# Get sorted list of node depths.
depth_keys = list(nodes_by_depth.keys())
depth_keys.sort(reverse=True)
# Check that all tensors required are computable.
# computable_tensors: all tensors in the graph
# that can be computed from the inputs provided.
computable_tensors = []
for x in inputs:
computable_tensors.append(x)
layers_with_complete_input = [] # To provide a better error msg.
for depth in depth_keys:
for node in nodes_by_depth[depth]:
layer = node.outbound_layer
if layer:
for x in nest.flatten(node.input_tensors):
if x not in computable_tensors:
raise ValueError('Graph disconnected: '
'cannot obtain value for tensor ' + str(x) +
' at layer "' + layer.name + '". '
'The following previous layers '
'were accessed without issue: ' +
str(layers_with_complete_input))
for x in nest.flatten(node.output_tensors):
computable_tensors.append(x)
layers_with_complete_input.append(layer.name)
# Ensure name unicity, which will be crucial for serialization
# (since serialized nodes refer to layers by their name).
all_names = [layer.name for layer in layers]
for name in all_names:
if all_names.count(name) != 1:
raise ValueError('The name "' + name + '" is used ' +
str(all_names.count(name)) + ' times in the model. '
'All layer names should be unique.')
return network_nodes, nodes_by_depth, layers, layers_by_depth
|
py
|
1a5a100ebb137c8359bb9b44bd0bcf8ae691e401
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import inspect
import logging
import requests
import xmltodict
from xml.parsers.expat import ExpatError
from optionaldict import optionaldict
from wechatpy.utils import random_string
from wechatpy.exceptions import WeChatPayException, InvalidSignatureException
from wechatpy.pay.utils import (
calculate_signature, calculate_signature_hmac, _check_signature, dict_to_xml
)
from wechatpy.pay.base import BaseWeChatPayAPI
from wechatpy.pay import api
logger = logging.getLogger(__name__)
def _is_api_endpoint(obj):
return isinstance(obj, BaseWeChatPayAPI)
class WeChatPay(object):
"""
微信支付接口
:param appid: 微信公众号 appid
:param sub_appid: 当前调起支付的小程序APPID
:param api_key: 商户 key,不要在这里使用小程序的密钥
:param mch_id: 商户号
:param sub_mch_id: 可选,子商户号,受理模式下必填
:param mch_cert: 必填,商户证书路径
:param mch_key: 必填,商户证书私钥路径
:param timeout: 可选,请求超时时间,单位秒,默认无超时设置
:param sandbox: 可选,是否使用测试环境,默认为 False
"""
redpack = api.WeChatRedpack()
"""红包接口"""
transfer = api.WeChatTransfer()
"""企业付款接口"""
coupon = api.WeChatCoupon()
"""代金券接口"""
order = api.WeChatOrder()
"""订单接口"""
refund = api.WeChatRefund()
"""退款接口"""
micropay = api.WeChatMicroPay()
"""刷卡支付接口"""
tools = api.WeChatTools()
"""工具类接口"""
jsapi = api.WeChatJSAPI()
"""公众号网页 JS 支付接口"""
withhold = api.WeChatWithhold()
"""代扣接口"""
API_BASE_URL = 'https://api.mch.weixin.qq.com/'
def __new__(cls, *args, **kwargs):
self = super(WeChatPay, cls).__new__(cls)
api_endpoints = inspect.getmembers(self, _is_api_endpoint)
for name, _api in api_endpoints:
api_cls = type(_api)
_api = api_cls(self)
setattr(self, name, _api)
return self
def __init__(self, appid, api_key, mch_id, sub_mch_id=None,
mch_cert=None, mch_key=None, timeout=None, sandbox=False, sub_appid=None):
self.appid = appid
self.sub_appid = sub_appid
self.api_key = api_key
self.mch_id = mch_id
self.sub_mch_id = sub_mch_id
self.mch_cert = mch_cert
self.mch_key = mch_key
self.timeout = timeout
self.sandbox = sandbox
self._sandbox_api_key = None
self._http = requests.Session()
def _fetch_sandbox_api_key(self):
nonce_str = random_string(32)
sign = calculate_signature({'mch_id': self.mch_id, 'nonce_str': nonce_str}, self.api_key)
payload = dict_to_xml({
'mch_id': self.mch_id,
'nonce_str': nonce_str,
}, sign=sign)
headers = {'Content-Type': 'text/xml'}
api_url = '{base}sandboxnew/pay/getsignkey'.format(base=self.API_BASE_URL)
response = self._http.post(api_url, data=payload, headers=headers)
return xmltodict.parse(response.text)['xml'].get('sandbox_signkey')
def _request(self, method, url_or_endpoint, **kwargs):
if not url_or_endpoint.startswith(('http://', 'https://')):
api_base_url = kwargs.pop('api_base_url', self.API_BASE_URL)
if self.sandbox:
api_base_url = '{url}sandboxnew/'.format(url=api_base_url)
url = '{base}{endpoint}'.format(
base=api_base_url,
endpoint=url_or_endpoint
)
else:
url = url_or_endpoint
if isinstance(kwargs.get('data', ''), dict):
data = kwargs['data']
if 'mchid' not in data:
# Fuck Tencent
data.setdefault('mch_id', self.mch_id)
data.setdefault('sub_mch_id', self.sub_mch_id)
data.setdefault('nonce_str', random_string(32))
data = optionaldict(data)
if data.get('sign_type', 'MD5') == 'HMAC-SHA256':
sign = calculate_signature_hmac(data, self.sandbox_api_key if self.sandbox else self.api_key)
else:
sign = calculate_signature(data, self.sandbox_api_key if self.sandbox else self.api_key)
body = dict_to_xml(data, sign)
body = body.encode('utf-8')
kwargs['data'] = body
# 商户证书
if self.mch_cert and self.mch_key:
kwargs['cert'] = (self.mch_cert, self.mch_key)
kwargs['timeout'] = kwargs.get('timeout', self.timeout)
logger.debug('Request to WeChat API: %s %s\n%s', method, url, kwargs)
res = self._http.request(
method=method,
url=url,
**kwargs
)
try:
res.raise_for_status()
except requests.RequestException as reqe:
raise WeChatPayException(
return_code=None,
client=self,
request=reqe.request,
response=reqe.response
)
return self._handle_result(res)
def _handle_result(self, res):
res.encoding = 'utf-8'
xml = res.text
logger.debug('Response from WeChat API \n %s', xml)
try:
data = xmltodict.parse(xml)['xml']
except (xmltodict.ParsingInterrupted, ExpatError):
# 解析 XML 失败
logger.debug('WeChat payment result xml parsing error', exc_info=True)
return xml
return_code = data['return_code']
return_msg = data.get('return_msg')
result_code = data.get('result_code')
errcode = data.get('err_code')
errmsg = data.get('err_code_des')
if return_code != 'SUCCESS' or result_code != 'SUCCESS':
# 返回状态码不为成功
raise WeChatPayException(
return_code,
result_code,
return_msg,
errcode,
errmsg,
client=self,
request=res.request,
response=res
)
return data
def get(self, url, **kwargs):
return self._request(
method='get',
url_or_endpoint=url,
**kwargs
)
def post(self, url, **kwargs):
return self._request(
method='post',
url_or_endpoint=url,
**kwargs
)
def check_signature(self, params):
return _check_signature(params, self.api_key if not self.sandbox else self.sandbox_api_key)
def parse_payment_result(self, xml):
"""解析微信支付结果通知"""
try:
data = xmltodict.parse(xml)
except (xmltodict.ParsingInterrupted, ExpatError):
raise InvalidSignatureException()
if not data or 'xml' not in data:
raise InvalidSignatureException()
data = data['xml']
sign = data.pop('sign', None)
real_sign = calculate_signature(data, self.api_key if not self.sandbox else self.sandbox_api_key)
if sign != real_sign:
raise InvalidSignatureException()
for key in ('total_fee', 'settlement_total_fee', 'cash_fee', 'coupon_fee', 'coupon_count'):
if key in data:
data[key] = int(data[key])
data['sign'] = sign
return data
@property
def sandbox_api_key(self):
if self.sandbox and self._sandbox_api_key is None:
self._sandbox_api_key = self._fetch_sandbox_api_key()
return self._sandbox_api_key
|
py
|
1a5a10258a0b82a7238a1afc4221070847943519
|
try:
conn.autocommit(False)
csr1.execute("UPDATE account_balance "+
" SET balance=balance-%s "+
"WHERE account_id=%s",
[tfer_amount,from_account])
csr1.execute("UPDATE account_balance "+
" SET balance=balance+%s "+
"WHERE account_id=%s",
[tfer_amount,to_account])
except MySQLdb.Error, e:
conn.rollback()
print "Transaction aborted: %d: %s" % (e.args[0], e.args[1])
else:
conn.commit()
print "Transaction succeeded"
|
py
|
1a5a10560d63b79a8e8352accb0df8b9f5fae26e
|
# Copyright 2020, 2021 by B. Knueven, D. Mildebrath, C. Muir, J-P Watson, and D.L. Woodruff
# This software is distributed under the 3-clause BSD License.
# Illustrate the use of sequential sampling for programmers using aircond.
#
import sys
import numpy as np
import argparse
import mpisppy.tests.examples.aircond as aircond
import pyomo.environ as pyo
import mpisppy.utils.sputils as sputils
import mpisppy.utils.amalgomator as amalgomator
import mpisppy.confidence_intervals.multi_seqsampling as multi_seqsampling
import mpisppy.confidence_intervals.confidence_parsers as confidence_parsers
from mpisppy.utils import baseparsers
#============================
def xhat_generator_aircond(scenario_names, solvername="gurobi", solver_options=None,
branching_factors=None, mudev = 0, sigmadev = 40,
start_ups=None, start_seed = 0):
'''
For sequential sampling.
Takes scenario names as input and provide the best solution for the
approximate problem associated with the scenarios.
Parameters
----------
scenario_names: list of str
Names of the scenario we use
solvername: str, optional
Name of the solver used. The default is "gurobi"
solver_options: dict, optional
Solving options. The default is None.
branching_factors: list, optional
Branching factors of the scenario 3. The default is [3,2,3]
(a 4 stage model with 18 different scenarios)
mudev: float, optional
The average deviation of demand between two stages; The default is 0.
sigma_dev: float, optional
The standard deviation from mudev for the demand difference between
two stages. The default is 40.
start_seed: int, optional
The starting seed, used to create different sample scenario trees.
The default is 0.
Returns
-------
xhat: str
A generated xhat, solution to the approximate problem induced by scenario_names.
NOTE: This tool only works when the file is in mpisppy. In SPInstances,
you must change the from_module line.
'''
num_scens = len(scenario_names)
ama_options = { "EF-mstage": True,
"EF_solver_name": solvername,
"EF_solver_options": solver_options,
"num_scens": num_scens,
"_mpisppy_probability": 1/num_scens,
"branching_factors":branching_factors,
"mudev":mudev,
"start_ups":start_ups,
"start_seed":start_seed,
"sigmadev":sigmadev
}
#We use from_module to build easily an Amalgomator object
ama = amalgomator.from_module("mpisppy.tests.examples.aircond",
ama_options,use_command_line=False)
#Correcting the building by putting the right scenarios.
ama.scenario_names = scenario_names
ama.verbose = False
ama.run()
# get the xhat
xhat = sputils.nonant_cache_from_ef(ama.ef)
return {'ROOT': xhat['ROOT']}
def main(args):
""" Code for aircond sequential sampling (in a function for easier testing)
Args:
args (parseargs): the command line arguments object from parseargs
Returns:
results (dict): the solution, gap confidence interval and T
"""
refmodelname = "mpisppy.tests.examples.aircond"
scenario_creator = aircond.scenario_creator
BFs = args.branching_factors
num_scens = np.prod(BFs)
solver_name = args.solver_name
mudev = args.mudev
sigmadev = args.sigmadev
scenario_creator_kwargs = {"num_scens" : num_scens,
"branching_factors": BFs,
"mudev": mudev,
"sigmadev": sigmadev,
"start_ups": False,
"start_seed": args.seed,
}
scenario_names = ['Scenario' + str(i) for i in range(num_scens)]
xhat_gen_options = {"scenario_names": scenario_names,
"solvername": solver_name,
"solver_options": None,
"branching_factors" : BFs,
"mudev": mudev,
"sigmadev": sigmadev,
"start_ups": False,
"start_seed": args.seed,
}
# simply called "options" by the SeqSampling constructor
inneroptions = {"solvername": solver_name,
"branching_factors": BFs,
"solver_options": None,
"sample_size_ratio": args.sample_size_ratio,
"xhat_gen_options": xhat_gen_options,
"ArRP": args.ArRP,
"kf_xhat": args.kf_GS,
"kf_xhat": args.kf_xhat,
"confidence_level": args.confidence_level,
"start_ups": False,
}
if args.BM_vs_BPL == "BM":
# Bayraksan and Morton
optionsBM = {'h': args.BM_h,
'hprime': args.BM_hprime,
'eps': args.BM_eps,
'epsprime': args.BM_eps_prime,
"p": args.BM_p,
"q": args.BM_q,
"xhat_gen_options": xhat_gen_options,
}
optionsBM.update(inneroptions)
sampler = multi_seqsampling.IndepScens_SeqSampling(refmodelname,
xhat_generator_aircond,
optionsBM,
stochastic_sampling=False,
stopping_criterion="BM",
solving_type="EF-mstage",
)
else: # must be BPL
optionsBPL = {'eps': args.BPL_eps,
"c0": args.BPL_c0,
"n0min": args.BPL_n0min,
"xhat_gen_options": xhat_gen_options,
}
optionsBPL.update(inneroptions)
ss = int(args.BPL_n0min) != 0
sampler = multi_seqsampling.IndepScens_SeqSampling(refmodelname,
xhat_generator_aircond,
optionsBPL,
stochastic_sampling=ss,
stopping_criterion="BPL",
solving_type="EF-mstage",
)
xhat = sampler.run()
return xhat
def _parse_args():
parser = baseparsers._basic_multistage("aircond_seqsampling")
parser = confidence_parsers.confidence_parser(parser)
parser = confidence_parsers.sequential_parser(parser)
parser = confidence_parsers.BM_parser(parser)
parser = confidence_parsers.BPL_parser(parser) # --help will show both BM and BPL
parser = aircond.inparser_adder(parser)
parser.add_argument("--solver-name",
help = "solver name (default gurobi)",
dest="solver_name",
type = str,
default="gurobi")
parser.add_argument("--seed",
help="Seed for random numbers (default is 1134)",
dest="seed",
type=int,
default=1134)
parser.add_argument("--BM-vs-BPL",
help="BM or BPL for Bayraksan and Morton or B and Pierre Louis",
dest="BM_vs_BPL",
type=str,
default=None)
parser.add_argument("--xhat1-file",
help="File to which xhat1 should be (e.g. to process with zhat4hat.py)",
dest="xhat1_file",
type=str,
default=None)
args = parser.parse_args()
if args.BM_vs_BPL is None:
raise argparse.ArgumentTypeError("--BM-vs_BPL must be given.")
if args.BM_vs_BPL != "BM" and args.BM_vs_BPL != "BPL":
raise argparse.ArgumentTypeError(f"--BM-vs_BPL must be BM or BPL (you gave {args.BM_vs_BMPL})")
return args
if __name__ == '__main__':
args = _parse_args()
results = main(args)
print(f"Final gap confidence interval results:", results)
if args.xhat1_file is not None:
print(f"Writing xhat1 to {args.xhat1_file}.npy")
root_nonants =np.fromiter((v for v in results["Candidate_solution"]["ROOT"]), float)
np.save(args.xhat1_file, root_nonants)
|
py
|
1a5a10d974766684599021ef24ee8b32cf0c0698
|
# -*- coding: utf-8 -*-
# Copyright 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import email.mime.multipart
import email.utils
import logging
import time
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from six.moves import urllib
import bleach
import jinja2
from twisted.internet import defer
from twisted.mail.smtp import sendmail
from synapse.api.constants import EventTypes
from synapse.api.errors import StoreError
from synapse.push.presentable_names import (
calculate_room_name,
descriptor_from_member_events,
name_from_member_event,
)
from synapse.types import UserID
from synapse.util.async_helpers import concurrently_execute
from synapse.visibility import filter_events_for_client
logger = logging.getLogger(__name__)
MESSAGE_FROM_PERSON_IN_ROOM = "You have a message on %(app)s from %(person)s " \
"in the %(room)s room..."
MESSAGE_FROM_PERSON = "You have a message on %(app)s from %(person)s..."
MESSAGES_FROM_PERSON = "You have messages on %(app)s from %(person)s..."
MESSAGES_IN_ROOM = "You have messages on %(app)s in the %(room)s room..."
MESSAGES_IN_ROOM_AND_OTHERS = \
"You have messages on %(app)s in the %(room)s room and others..."
MESSAGES_FROM_PERSON_AND_OTHERS = \
"You have messages on %(app)s from %(person)s and others..."
INVITE_FROM_PERSON_TO_ROOM = "%(person)s has invited you to join the " \
"%(room)s room on %(app)s..."
INVITE_FROM_PERSON = "%(person)s has invited you to chat on %(app)s..."
CONTEXT_BEFORE = 1
CONTEXT_AFTER = 1
# From https://github.com/matrix-org/matrix-react-sdk/blob/master/src/HtmlUtils.js
ALLOWED_TAGS = [
'font', # custom to matrix for IRC-style font coloring
'del', # for markdown
# deliberately no h1/h2 to stop people shouting.
'h3', 'h4', 'h5', 'h6', 'blockquote', 'p', 'a', 'ul', 'ol',
'nl', 'li', 'b', 'i', 'u', 'strong', 'em', 'strike', 'code', 'hr', 'br', 'div',
'table', 'thead', 'caption', 'tbody', 'tr', 'th', 'td', 'pre'
]
ALLOWED_ATTRS = {
# custom ones first:
"font": ["color"], # custom to matrix
"a": ["href", "name", "target"], # remote target: custom to matrix
# We don't currently allow img itself by default, but this
# would make sense if we did
"img": ["src"],
}
# When bleach release a version with this option, we can specify schemes
# ALLOWED_SCHEMES = ["http", "https", "ftp", "mailto"]
class Mailer(object):
def __init__(self, hs, app_name, notif_template_html, notif_template_text):
self.hs = hs
self.notif_template_html = notif_template_html
self.notif_template_text = notif_template_text
self.store = self.hs.get_datastore()
self.macaroon_gen = self.hs.get_macaroon_generator()
self.state_handler = self.hs.get_state_handler()
self.app_name = app_name
logger.info("Created Mailer for app_name %s" % app_name)
@defer.inlineCallbacks
def send_notification_mail(self, app_id, user_id, email_address,
push_actions, reason):
try:
from_string = self.hs.config.email_notif_from % {
"app": self.app_name
}
except TypeError:
from_string = self.hs.config.email_notif_from
raw_from = email.utils.parseaddr(from_string)[1]
raw_to = email.utils.parseaddr(email_address)[1]
if raw_to == '':
raise RuntimeError("Invalid 'to' address")
rooms_in_order = deduped_ordered_list(
[pa['room_id'] for pa in push_actions]
)
notif_events = yield self.store.get_events(
[pa['event_id'] for pa in push_actions]
)
notifs_by_room = {}
for pa in push_actions:
notifs_by_room.setdefault(pa["room_id"], []).append(pa)
# collect the current state for all the rooms in which we have
# notifications
state_by_room = {}
try:
user_display_name = yield self.store.get_profile_displayname(
UserID.from_string(user_id).localpart
)
if user_display_name is None:
user_display_name = user_id
except StoreError:
user_display_name = user_id
@defer.inlineCallbacks
def _fetch_room_state(room_id):
room_state = yield self.store.get_current_state_ids(room_id)
state_by_room[room_id] = room_state
# Run at most 3 of these at once: sync does 10 at a time but email
# notifs are much less realtime than sync so we can afford to wait a bit.
yield concurrently_execute(_fetch_room_state, rooms_in_order, 3)
# actually sort our so-called rooms_in_order list, most recent room first
rooms_in_order.sort(
key=lambda r: -(notifs_by_room[r][-1]['received_ts'] or 0)
)
rooms = []
for r in rooms_in_order:
roomvars = yield self.get_room_vars(
r, user_id, notifs_by_room[r], notif_events, state_by_room[r]
)
rooms.append(roomvars)
reason['room_name'] = yield calculate_room_name(
self.store, state_by_room[reason['room_id']], user_id,
fallback_to_members=True
)
summary_text = yield self.make_summary_text(
notifs_by_room, state_by_room, notif_events, user_id, reason
)
template_vars = {
"user_display_name": user_display_name,
"unsubscribe_link": self.make_unsubscribe_link(
user_id, app_id, email_address
),
"summary_text": summary_text,
"app_name": self.app_name,
"rooms": rooms,
"reason": reason,
}
html_text = self.notif_template_html.render(**template_vars)
html_part = MIMEText(html_text, "html", "utf8")
plain_text = self.notif_template_text.render(**template_vars)
text_part = MIMEText(plain_text, "plain", "utf8")
multipart_msg = MIMEMultipart('alternative')
multipart_msg['Subject'] = "[%s] %s" % (self.app_name, summary_text)
multipart_msg['From'] = from_string
multipart_msg['To'] = email_address
multipart_msg['Date'] = email.utils.formatdate()
multipart_msg['Message-ID'] = email.utils.make_msgid()
multipart_msg.attach(text_part)
multipart_msg.attach(html_part)
logger.info("Sending email push notification to %s" % email_address)
# logger.debug(html_text)
yield sendmail(
self.hs.config.email_smtp_host,
raw_from, raw_to, multipart_msg.as_string(),
port=self.hs.config.email_smtp_port,
requireAuthentication=self.hs.config.email_smtp_user is not None,
username=self.hs.config.email_smtp_user,
password=self.hs.config.email_smtp_pass,
requireTransportSecurity=self.hs.config.require_transport_security
)
@defer.inlineCallbacks
def get_room_vars(self, room_id, user_id, notifs, notif_events, room_state_ids):
my_member_event_id = room_state_ids[("m.room.member", user_id)]
my_member_event = yield self.store.get_event(my_member_event_id)
is_invite = my_member_event.content["membership"] == "invite"
room_name = yield calculate_room_name(self.store, room_state_ids, user_id)
room_vars = {
"title": room_name,
"hash": string_ordinal_total(room_id), # See sender avatar hash
"notifs": [],
"invite": is_invite,
"link": self.make_room_link(room_id),
}
if not is_invite:
for n in notifs:
notifvars = yield self.get_notif_vars(
n, user_id, notif_events[n['event_id']], room_state_ids
)
# merge overlapping notifs together.
# relies on the notifs being in chronological order.
merge = False
if room_vars['notifs'] and 'messages' in room_vars['notifs'][-1]:
prev_messages = room_vars['notifs'][-1]['messages']
for message in notifvars['messages']:
pm = list(filter(lambda pm: pm['id'] == message['id'],
prev_messages))
if pm:
if not message["is_historical"]:
pm[0]["is_historical"] = False
merge = True
elif merge:
# we're merging, so append any remaining messages
# in this notif to the previous one
prev_messages.append(message)
if not merge:
room_vars['notifs'].append(notifvars)
defer.returnValue(room_vars)
@defer.inlineCallbacks
def get_notif_vars(self, notif, user_id, notif_event, room_state_ids):
results = yield self.store.get_events_around(
notif['room_id'], notif['event_id'],
before_limit=CONTEXT_BEFORE, after_limit=CONTEXT_AFTER
)
ret = {
"link": self.make_notif_link(notif),
"ts": notif['received_ts'],
"messages": [],
}
the_events = yield filter_events_for_client(
self.store, user_id, results["events_before"]
)
the_events.append(notif_event)
for event in the_events:
messagevars = yield self.get_message_vars(notif, event, room_state_ids)
if messagevars is not None:
ret['messages'].append(messagevars)
defer.returnValue(ret)
@defer.inlineCallbacks
def get_message_vars(self, notif, event, room_state_ids):
if event.type != EventTypes.Message:
return
sender_state_event_id = room_state_ids[("m.room.member", event.sender)]
sender_state_event = yield self.store.get_event(sender_state_event_id)
sender_name = name_from_member_event(sender_state_event)
sender_avatar_url = sender_state_event.content.get("avatar_url")
# 'hash' for deterministically picking default images: use
# sender_hash % the number of default images to choose from
sender_hash = string_ordinal_total(event.sender)
msgtype = event.content.get("msgtype")
ret = {
"msgtype": msgtype,
"is_historical": event.event_id != notif['event_id'],
"id": event.event_id,
"ts": event.origin_server_ts,
"sender_name": sender_name,
"sender_avatar_url": sender_avatar_url,
"sender_hash": sender_hash,
}
if msgtype == "m.text":
self.add_text_message_vars(ret, event)
elif msgtype == "m.image":
self.add_image_message_vars(ret, event)
if "body" in event.content:
ret["body_text_plain"] = event.content["body"]
defer.returnValue(ret)
def add_text_message_vars(self, messagevars, event):
msgformat = event.content.get("format")
messagevars["format"] = msgformat
formatted_body = event.content.get("formatted_body")
body = event.content.get("body")
if msgformat == "org.matrix.custom.html" and formatted_body:
messagevars["body_text_html"] = safe_markup(formatted_body)
elif body:
messagevars["body_text_html"] = safe_text(body)
return messagevars
def add_image_message_vars(self, messagevars, event):
messagevars["image_url"] = event.content["url"]
return messagevars
@defer.inlineCallbacks
def make_summary_text(self, notifs_by_room, room_state_ids,
notif_events, user_id, reason):
if len(notifs_by_room) == 1:
# Only one room has new stuff
room_id = notifs_by_room.keys()[0]
# If the room has some kind of name, use it, but we don't
# want the generated-from-names one here otherwise we'll
# end up with, "new message from Bob in the Bob room"
room_name = yield calculate_room_name(
self.store, room_state_ids[room_id], user_id, fallback_to_members=False
)
my_member_event_id = room_state_ids[room_id][("m.room.member", user_id)]
my_member_event = yield self.store.get_event(my_member_event_id)
if my_member_event.content["membership"] == "invite":
inviter_member_event_id = room_state_ids[room_id][
("m.room.member", my_member_event.sender)
]
inviter_member_event = yield self.store.get_event(
inviter_member_event_id
)
inviter_name = name_from_member_event(inviter_member_event)
if room_name is None:
defer.returnValue(INVITE_FROM_PERSON % {
"person": inviter_name,
"app": self.app_name
})
else:
defer.returnValue(INVITE_FROM_PERSON_TO_ROOM % {
"person": inviter_name,
"room": room_name,
"app": self.app_name,
})
sender_name = None
if len(notifs_by_room[room_id]) == 1:
# There is just the one notification, so give some detail
event = notif_events[notifs_by_room[room_id][0]["event_id"]]
if ("m.room.member", event.sender) in room_state_ids[room_id]:
state_event_id = room_state_ids[room_id][
("m.room.member", event.sender)
]
state_event = yield self.store.get_event(state_event_id)
sender_name = name_from_member_event(state_event)
if sender_name is not None and room_name is not None:
defer.returnValue(MESSAGE_FROM_PERSON_IN_ROOM % {
"person": sender_name,
"room": room_name,
"app": self.app_name,
})
elif sender_name is not None:
defer.returnValue(MESSAGE_FROM_PERSON % {
"person": sender_name,
"app": self.app_name,
})
else:
# There's more than one notification for this room, so just
# say there are several
if room_name is not None:
defer.returnValue(MESSAGES_IN_ROOM % {
"room": room_name,
"app": self.app_name,
})
else:
# If the room doesn't have a name, say who the messages
# are from explicitly to avoid, "messages in the Bob room"
sender_ids = list(set([
notif_events[n['event_id']].sender
for n in notifs_by_room[room_id]
]))
member_events = yield self.store.get_events([
room_state_ids[room_id][("m.room.member", s)]
for s in sender_ids
])
defer.returnValue(MESSAGES_FROM_PERSON % {
"person": descriptor_from_member_events(member_events.values()),
"app": self.app_name,
})
else:
# Stuff's happened in multiple different rooms
# ...but we still refer to the 'reason' room which triggered the mail
if reason['room_name'] is not None:
defer.returnValue(MESSAGES_IN_ROOM_AND_OTHERS % {
"room": reason['room_name'],
"app": self.app_name,
})
else:
# If the reason room doesn't have a name, say who the messages
# are from explicitly to avoid, "messages in the Bob room"
sender_ids = list(set([
notif_events[n['event_id']].sender
for n in notifs_by_room[reason['room_id']]
]))
member_events = yield self.store.get_events([
room_state_ids[room_id][("m.room.member", s)]
for s in sender_ids
])
defer.returnValue(MESSAGES_FROM_PERSON_AND_OTHERS % {
"person": descriptor_from_member_events(member_events.values()),
"app": self.app_name,
})
def make_room_link(self, room_id):
if self.hs.config.email_riot_base_url:
base_url = "%s/#/room" % (self.hs.config.email_riot_base_url)
elif self.app_name == "Vector":
# need /beta for Universal Links to work on iOS
base_url = "https://vector.im/beta/#/room"
else:
base_url = "https://matrix.to/#"
return "%s/%s" % (base_url, room_id)
def make_notif_link(self, notif):
if self.hs.config.email_riot_base_url:
return "%s/#/room/%s/%s" % (
self.hs.config.email_riot_base_url,
notif['room_id'], notif['event_id']
)
elif self.app_name == "Vector":
# need /beta for Universal Links to work on iOS
return "https://vector.im/beta/#/room/%s/%s" % (
notif['room_id'], notif['event_id']
)
else:
return "https://matrix.to/#/%s/%s" % (
notif['room_id'], notif['event_id']
)
def make_unsubscribe_link(self, user_id, app_id, email_address):
params = {
"access_token": self.macaroon_gen.generate_delete_pusher_token(user_id),
"app_id": app_id,
"pushkey": email_address,
}
# XXX: make r0 once API is stable
return "%s_matrix/client/unstable/pushers/remove?%s" % (
self.hs.config.public_baseurl,
urllib.parse.urlencode(params),
)
def safe_markup(raw_html):
return jinja2.Markup(bleach.linkify(bleach.clean(
raw_html, tags=ALLOWED_TAGS, attributes=ALLOWED_ATTRS,
# bleach master has this, but it isn't released yet
# protocols=ALLOWED_SCHEMES,
strip=True
)))
def safe_text(raw_text):
"""
Process text: treat it as HTML but escape any tags (ie. just escape the
HTML) then linkify it.
"""
return jinja2.Markup(bleach.linkify(bleach.clean(
raw_text, tags=[], attributes={},
strip=False
)))
def deduped_ordered_list(l):
seen = set()
ret = []
for item in l:
if item not in seen:
seen.add(item)
ret.append(item)
return ret
def string_ordinal_total(s):
tot = 0
for c in s:
tot += ord(c)
return tot
def format_ts_filter(value, format):
return time.strftime(format, time.localtime(value / 1000))
def load_jinja2_templates(config):
"""Load the jinja2 email templates from disk
Returns:
(notif_template_html, notif_template_text)
"""
logger.info("loading jinja2")
loader = jinja2.FileSystemLoader(config.email_template_dir)
env = jinja2.Environment(loader=loader)
env.filters["format_ts"] = format_ts_filter
env.filters["mxc_to_http"] = _create_mxc_to_http_filter(config)
notif_template_html = env.get_template(
config.email_notif_template_html
)
notif_template_text = env.get_template(
config.email_notif_template_text
)
return notif_template_html, notif_template_text
def _create_mxc_to_http_filter(config):
def mxc_to_http_filter(value, width, height, resize_method="crop"):
if value[0:6] != "mxc://":
return ""
serverAndMediaId = value[6:]
fragment = None
if '#' in serverAndMediaId:
(serverAndMediaId, fragment) = serverAndMediaId.split('#', 1)
fragment = "#" + fragment
params = {
"width": width,
"height": height,
"method": resize_method,
}
return "%s_matrix/media/v1/thumbnail/%s?%s%s" % (
config.public_baseurl,
serverAndMediaId,
urllib.parse.urlencode(params),
fragment or "",
)
return mxc_to_http_filter
|
py
|
1a5a110c75c59549f06cf72a6b7fb75f0bc17883
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from scipy.io import wavfile
import tensorflow.compat.v2 as tf
from tensorflow_examples.lite.model_maker.core.data_util import audio_dataloader
from tensorflow_examples.lite.model_maker.core.task.model_spec import audio_spec
def write_file(root, filepath):
full_path = os.path.join(root, filepath)
os.makedirs(os.path.dirname(full_path), exist_ok=True)
with open(full_path, 'w') as f:
f.write('<content>')
def write_sample(root,
category,
file_name,
sample_rate,
duration_sec,
value,
dtype=np.int16):
os.makedirs(os.path.join(root, category), exist_ok=True)
xs = value * np.ones(shape=(int(sample_rate * duration_sec),), dtype=dtype)
wavfile.write(os.path.join(root, category, file_name), sample_rate, xs)
class MockSpec(audio_spec.BaseSpec):
def create_model(self):
return None
def run_classifier(self, *args, **kwargs):
return None
class Base(tf.test.TestCase):
def _get_folder_path(self, sub_folder_name):
folder_path = os.path.join(self.get_temp_dir(), sub_folder_name)
if os.path.exists(folder_path):
return
tf.compat.v1.logging.info('Test path: %s', folder_path)
os.mkdir(folder_path)
return folder_path
class LoadFromESC50Test(Base):
def test_spec(self):
folder_path = self._get_folder_path('test_examples_helper')
spec = audio_spec.YAMNetSpec()
audio_dataloader.DataLoader.from_esc50(spec, folder_path)
spec = audio_spec.BrowserFFTSpec()
with self.assertRaises(AssertionError):
audio_dataloader.DataLoader.from_esc50(spec, folder_path)
class LoadFromFolderTest(Base):
def test_spec(self):
folder_path = self._get_folder_path('test_examples_helper')
write_sample(folder_path, 'unknown', '2s.wav', 44100, 2, value=1)
spec = audio_spec.YAMNetSpec()
with self.assertRaises(AssertionError):
audio_dataloader.DataLoader.from_folder(spec, folder_path)
spec = audio_spec.BrowserFFTSpec()
audio_dataloader.DataLoader.from_folder(spec, folder_path)
def test_examples_helper(self):
root = self._get_folder_path('test_examples_helper')
write_file(root, 'a/1.wav')
write_file(root, 'a/2.wav')
write_file(root, 'b/1.wav')
write_file(root, 'b/README') # Ignored
write_file(root, 'a/b/c/d.wav') # Ignored
write_file(root, 'AUTHORS.md') # Ignored
write_file(root, 'temp.wav') # Ignored
def is_wav_files(name):
return name.endswith('.wav')
def fullpath(name):
return os.path.join(root, name)
helper = audio_dataloader.ExamplesHelper(root, is_wav_files)
self.assertEqual(helper.sorted_cateogries, ['a', 'b'])
self.assertEqual(
helper.examples_and_labels(),
([fullpath('a/1.wav'),
fullpath('a/2.wav'),
fullpath('b/1.wav')], ['a', 'a', 'b']))
self.assertEqual(
helper.examples_and_label_indices(),
([fullpath('a/1.wav'),
fullpath('a/2.wav'),
fullpath('b/1.wav')], [0, 0, 1]))
def test_no_audio_files_found(self):
folder_path = self._get_folder_path('test_no_audio_files_found')
write_sample(folder_path, 'unknown', '2s.bak', 44100, 2, value=1)
with self.assertRaisesRegexp(ValueError, 'No audio files found'):
spec = MockSpec(model_dir=folder_path)
audio_dataloader.DataLoader.from_folder(spec, folder_path)
def test_check_encoding(self):
folder_path = self._get_folder_path('test_check_encoding')
write_sample(
folder_path, 'unknown', '2s.wav', 44100, 2, value=0, dtype=np.uint8)
with self.assertRaisesRegexp(ValueError, '16 bit PCM'):
spec = MockSpec(model_dir=folder_path)
audio_dataloader.DataLoader.from_folder(spec, folder_path)
def test_from_folder(self):
folder_path = self._get_folder_path('test_from_folder')
write_sample(folder_path, 'background', '2s.wav', 44100, 2, value=0)
write_sample(folder_path, 'command1', '1s.wav', 44100, 1, value=1)
# Too short, skipped.
write_sample(folder_path, 'command1', '0.1s.wav', 44100, .1, value=2)
# Not long enough for 2 files, the remaining .5s will be skipped.
write_sample(folder_path, 'command2', '1.5s.wav', 44100, 1.5, value=3)
# Skipped, too short.
write_sample(folder_path, 'command0', '0.1s.wav', 4410, .1, value=4)
# Resampled, after resample, the content becomes [4 5 5 ... 4 5 4]
write_sample(folder_path, 'command0', '1.8s.wav', 4410, 1.8, value=5)
# Ignored due to wrong file extension
write_sample(folder_path, 'command0', '1.8s.bak', 4410, 1.8, value=6)
spec = MockSpec(model_dir=folder_path)
loader = audio_dataloader.DataLoader.from_folder(spec, folder_path)
self.assertEqual(len(loader), 5)
self.assertEqual(loader.index_to_label,
['background', 'command0', 'command1', 'command2'])
def is_cached(filename):
path = os.path.join(folder_path, 'cache', filename)
self.assertTrue(tf.io.gfile.exists(path))
sampling_rate, _ = wavfile.read(path)
self.assertEqual(sampling_rate, 44100)
is_cached('background/2s_0.wav')
is_cached('background/2s_1.wav')
is_cached('command1/1s_0.wav')
is_cached('command2/1.5s_0.wav')
is_cached('command0/1.8s_0.wav')
# Consistent dataset.
consistent_loader = audio_dataloader.DataLoader.from_folder(
spec, folder_path, shuffle=False)
expected_labels = iter(
['background', 'background', 'command0', 'command1', 'command2'])
expected_values = iter([0., 0., 4., 1., 3.])
for feature, label_idx in consistent_loader.gen_dataset().unbatch():
self.assertEqual(consistent_loader.index_to_label[label_idx],
next(expected_labels))
self.assertEqual(feature.shape, (1, spec.expected_waveform_len))
self.assertEqual(feature.dtype, tf.float32)
# tf.audio.decode_wav op scales the int16 PCM to float value between -1
# and 1 so the multiplier is 1 << 15
# Check tensorflow/core/lib/wav/wav_io.cc for the implementation.
self.assertNear(feature[0][0] * (1 << 15), next(expected_values), 1e-4)
if __name__ == '__main__':
tf.test.main()
|
py
|
1a5a112572665df8f7ce328ab36ad1143376b9f4
|
class SpaCyTagger:
def __init__(self, entity):
self.entity = entity
def get_spacy_tokens(self, tokenized_cleaned, sentence):
"""
Return SpaCy training tuple based on the tokenized sentece
tokenized_cleaned: A dataframe that contains a `range` column with the entitiy range in the sentece (start:end)
"""
entities = []
for t in tokenized_cleaned.itertuples():
entities.append((int(t.range.split(":")[0]), int(t.range.split(":")[1]), self.entity))
return (sentence, {"entities" : entities})
|
py
|
1a5a12955c508633b0b80bf05717d6a1f7e8eb97
|
from basicsr import train, test
import argparse
def parse_args(train=True):
parser = argparse.ArgumentParser(description="")
if train:
parser.add_argument('--opt', default='options/train/SRResNet_SRGAN/train_MSRResNet_x4.yml')
# parser.add_argument('--opt', default='options/train/CARN/train_CARN_x4.yml')
# parser.add_argument('--opt', default='options/train/CARN/train_CARNA_x4.yml')
# parser.add_argument('--opt', default='options/train/EDSR/train_EDSR_Mx4.yml')
# parser.add_argument('--opt', default='options/train/SRResNet_SRGAN/train_MSRResNetD_x4.yml')
# parser.add_argument('--opt', default='options/train/RFDN/train_RFDN_x4.yml')
# parser.add_argument('--opt', default='options/train/IMDN/train_IMDN_x4.yml')
else:
# parser.add_argument('--opt', default='options/test/SRResNet_SRGAN/test_MSRResNet_x4.yml')
# parser.add_argument('--opt', default='options/test/SRResNet_SRGAN/test_MSRResNet_x4_woGT.yml')
# parser.add_argument('--opt', default='options/test/CARN/test_CARN_x4.yml')
parser.add_argument('--opt', default='options/test/CARN/test_CARN_x4_woGT.yml')
parser.add_argument('--launcher', default='none')
parser.add_argument('--local_rank', type=int, default=0)
return parser
if __name__ == '__main__':
is_train = True
parser = parse_args(train=is_train)
args = parser.parse_args()
if is_train:
opt = train.parse_options(args=args)
train.main(opt)
else:
test_opt = test.parse_options(is_train=is_train, args=args)
test.main(test_opt)
|
py
|
1a5a12d9eb13e5c16ac00f6e77cd800bb0e1d3dd
|
# coding=utf-8
# Copyright 2021-present, the Recognai S.L. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import pytest
import rubrix as rb
from rubrix.labeling.text_classification import find_label_errors
from rubrix.labeling.text_classification.label_errors import (
MissingPredictionError,
NoRecordsError,
SortBy,
_construct_s_and_psx,
)
from tests.server.test_helpers import client, mocking_client
@pytest.fixture(
params=[False, True], ids=["single_label", "multi_label"], scope="module"
)
def records(request):
if request.param:
return [
rb.TextClassificationRecord(
inputs="test", annotation=anot, prediction=pred, multi_label=True, id=i
)
for i, anot, pred in zip(
range(2 * 6),
[["bad"], ["bad", "good"]] * 6,
[[("bad", 0.1), ("good", 0.9)], [("good", 0.9), ("bad", 0.01)]] * 6,
)
]
return [
rb.TextClassificationRecord(
inputs="test", annotation=anot, prediction=pred, id=i
)
for i, anot, pred in zip(
range(2 * 6),
["good", "bad"] * 6,
[[("bad", 0.9), ("good", 0.1)], [("good", 0.8), ("bad", 0.2)]] * 6,
)
]
def test_sort_by_enum():
with pytest.raises(ValueError, match="mock is not a valid SortBy"):
SortBy("mock")
def test_not_installed(monkeypatch):
monkeypatch.setitem(sys.modules, "cleanlab", None)
with pytest.raises(ModuleNotFoundError, match="pip install cleanlab"):
find_label_errors(None)
def test_no_records():
records = [
rb.TextClassificationRecord(inputs="test", prediction=[("mock", 0.0)]),
rb.TextClassificationRecord(inputs="test", annotation="test"),
]
with pytest.raises(
NoRecordsError, match="none of your records have a prediction AND annotation"
):
find_label_errors(records)
def test_multi_label_warning(caplog):
record = rb.TextClassificationRecord(
inputs="test", prediction=[("mock", 0.0)], annotation="mock"
)
find_label_errors([record], multi_label="True")
assert (
"You provided the kwarg 'multi_label', but it is determined automatically"
in caplog.text
)
@pytest.mark.parametrize(
"sort_by,expected",
[
("likelihood", "normalized_margin"),
("prediction", "prob_given_label"),
("none", None),
],
)
def test_sort_by(monkeypatch, sort_by, expected):
def mock_get_noise_indices(*args, **kwargs):
assert kwargs["sorted_index_method"] == expected
return []
monkeypatch.setattr(
"cleanlab.pruning.get_noise_indices",
mock_get_noise_indices,
)
record = rb.TextClassificationRecord(
inputs="mock", prediction=[("mock", 0.1)], annotation="mock"
)
find_label_errors(records=[record], sort_by=sort_by)
def test_kwargs(monkeypatch, records):
is_multi_label = records[0].multi_label
def mock_get_noise_indices(s, psx, **kwargs):
assert kwargs == {
"multi_label": is_multi_label,
"sorted_index_method": "normalized_margin",
"mock": "mock",
}
return []
monkeypatch.setattr(
"cleanlab.pruning.get_noise_indices",
mock_get_noise_indices,
)
with pytest.raises(
ValueError, match="'sorted_index_method' kwarg is not supported"
):
find_label_errors(records=records, sorted_index_method="mock")
find_label_errors(records=records, mock="mock")
def test_construct_s_and_psx(records):
import numpy as np
s, psx = _construct_s_and_psx(records[:2])
if records[0].multi_label:
s_expected = np.array(
[
list([0]),
list([0, 1]),
]
)
psx_expected = np.array(
[
[0.1, 0.9],
[0.01, 0.9],
]
)
else:
s_expected = np.array([1, 0])
psx_expected = np.array(
[
[0.9, 0.1],
[0.2, 0.8],
]
)
assert (s == s_expected).all()
assert (psx == psx_expected).all()
def test_missing_predictions():
records = [
rb.TextClassificationRecord(
inputs="test", annotation="mock", prediction=[("mock2", 0.1)]
)
]
with pytest.raises(
MissingPredictionError,
match="It seems predictions are missing for the label 'mock'",
):
_construct_s_and_psx(records)
records.append(
rb.TextClassificationRecord(
inputs="test", annotation="mock", prediction=[("mock", 0.1)]
)
)
with pytest.raises(
MissingPredictionError,
match="It seems a prediction for 'mock' is missing in the following record",
):
_construct_s_and_psx(records)
@pytest.fixture
def dataset(monkeypatch, records):
mocking_client(monkeypatch, client)
dataset = "dataset_for_label_errors"
rb.log(records, name=dataset)
yield dataset
rb.delete(dataset)
def test_find_label_errors_integration(dataset):
records = rb.load(dataset, as_pandas=False)
recs = find_label_errors(records)
assert [rec.id for rec in recs] == list(range(0, 11, 2)) + list(range(1, 12, 2))
|
py
|
1a5a13db0e6e22a959b151d6507c167f7f713222
|
#!/usr/bin/python
import os
ROOT_DIR = os.path.join('/root/azuredata')
# ------------------------- SHELLINABOX ----------------------------- #
SHELLINABOX_DOWNLOAD_URI = 'https://shellinabox.googlecode.com/files/'
SHELLINABOX_SRC = 'shellinabox-2.14'
SHELLINABOX_PREFIX = '/usr/local/shellinabox'
SHELLINABOX_CERT_DIR = '-c /var/lib/shellinabox'
SHELLINABOX_CERT_OWNER = '-u shellinabox -g shellinabox'
SHELLINABOX_DEFAULT_OPTS = '--user-css "Black on White:+/etc/shellinabox/options-enabled/00+Black on White.css,White On Black:-/etc/shellinabox/options-enabled/00_White On Black.css;Color Terminal:+/etc/shellinabox/options-enabled/01+Color Terminal.css,Monochrome:-/etc/shellinabox/options-enabled/01_Monochrome.css" --no-beep'
SHELLINABOX_PORT_RANGE = range(4200, 4204)
# ------------------------- GUACAMOLE ----------------------------- #
GUAC_CONF_DIR = '/etc/guacamole'
GUAC_LIB_DIR = '/var/lib/guacamole'
GUAC_CLASSPATH = os.path.join(GUAC_LIB_DIR, 'classpath')
GUAC_PROPERTIES = os.path.join(GUAC_CONF_DIR, 'guacamole.properties')
GUAC_VERSION = '0.9.2'
GUAC_SERVER_NAME = 'guacamole-server'
GUAC_CLIENT_NAME = 'guacamole-client'
GUAC_CLIENT_WAR_NAME = 'guacamole.war'
# no-auth configuration
EXTENSION_NOAUTH = 'guacamole-auth-noauth'
EXTENSION_NOAUTH_WITH_VERSION = EXTENSION_NOAUTH + '-' + GUAC_VERSION
NOAUTH_CONF = """auth-provider: net.sourceforge.guacamole.net.auth.noauth.NoAuthenticationProvider
noauth-config: /etc/guacamole/noauth-config.xml
"""
NOAUTH_CONF_FILE = 'noauth-config.xml'
NOAUTH_CONF_FILE_CONTENTS = """<configs>
<config name="WEB RDP" protocol="rdp">
<param name="hostname" value="localhost" />
<param name="port" value="3389" />
</config>
<config name="WEB SSH" protocol="ssh">
<param name="hostname" value="localhost" />
<param name="port" value="22" />
</config>
<config name="WEB VNC" protocol="vnc">
<param name="hostname" value="localhost" />
<param name="port" value="5901" />
</config>
</configs>
"""
# mysql-auth configuration
EXTENSION_MYSQLAUTH = 'guacamole-auth-mysql'
EXTENSION_MYSQLAUTH_WITH_VERSION = EXTENSION_MYSQLAUTH + '-' + GUAC_VERSION
MYSQLAUTH_CONF = """
"""
# Maybe Change
SRC_URI = 'https://binxia.blob.core.windows.net/stepping-stones-services/'
AZURE_VM_DOMAIN = '.cloudapp.net'
|
py
|
1a5a13dd4193068e66197c17814ebfd63cd39e7f
|
import asyncio
import importlib
import logging
import os
import sys
import threading
import traceback
from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor
from concurrent.futures.process import BrokenProcessPool
from numbers import Number
from operator import add
from time import sleep
from unittest import mock
import psutil
import pytest
from tlz import first, pluck, sliding_window
import dask
from dask import delayed
from dask.system import CPU_COUNT
from dask.utils import tmpfile
import distributed
from distributed import (
Client,
Nanny,
Reschedule,
default_client,
get_client,
get_worker,
wait,
)
from distributed.comm.registry import backends
from distributed.comm.tcp import TCPBackend
from distributed.compatibility import LINUX, WINDOWS
from distributed.core import CommClosedError, Status, rpc
from distributed.diagnostics import nvml
from distributed.diagnostics.plugin import PipInstall
from distributed.metrics import time
from distributed.scheduler import Scheduler
from distributed.utils import TimeoutError
from distributed.utils_test import (
TaskStateMetadataPlugin,
_LockedCommPool,
captured_logger,
dec,
div,
gen_cluster,
gen_test,
inc,
mul,
nodebug,
slowinc,
slowsum,
)
from distributed.worker import Worker, error_message, logger, parse_memory_limit
pytestmark = pytest.mark.ci1
@gen_cluster(nthreads=[])
async def test_worker_nthreads(s):
async with Worker(s.address) as w:
assert w.executor._max_workers == CPU_COUNT
@gen_cluster()
async def test_str(s, a, b):
assert a.address in str(a)
assert a.address in repr(a)
assert str(a.nthreads) in str(a)
assert str(a.nthreads) in repr(a)
assert str(a.executing_count) in repr(a)
@gen_cluster(nthreads=[])
async def test_identity(s):
async with Worker(s.address) as w:
ident = w.identity(None)
assert "Worker" in ident["type"]
assert ident["scheduler"] == s.address
assert isinstance(ident["nthreads"], int)
assert isinstance(ident["memory_limit"], Number)
@gen_cluster(client=True)
async def test_worker_bad_args(c, s, a, b):
class NoReprObj:
"""This object cannot be properly represented as a string."""
def __str__(self):
raise ValueError("I have no str representation.")
def __repr__(self):
raise ValueError("I have no repr representation.")
x = c.submit(NoReprObj, workers=a.address)
await wait(x)
assert not a.executing_count
assert a.data
def bad_func(*args, **kwargs):
1 / 0
class MockLoggingHandler(logging.Handler):
"""Mock logging handler to check for expected logs."""
def __init__(self, *args, **kwargs):
self.reset()
super().__init__(*args, **kwargs)
def emit(self, record):
self.messages[record.levelname.lower()].append(record.getMessage())
def reset(self):
self.messages = {
"debug": [],
"info": [],
"warning": [],
"error": [],
"critical": [],
}
hdlr = MockLoggingHandler()
old_level = logger.level
logger.setLevel(logging.DEBUG)
logger.addHandler(hdlr)
y = c.submit(bad_func, x, k=x, workers=b.address)
await wait(y)
assert not b.executing_count
assert y.status == "error"
# Make sure job died because of bad func and not because of bad
# argument.
with pytest.raises(ZeroDivisionError):
await y
tb = await y._traceback()
assert any("1 / 0" in line for line in pluck(3, traceback.extract_tb(tb)) if line)
assert "Compute Failed" in hdlr.messages["warning"][0]
logger.setLevel(old_level)
# Now we check that both workers are still alive.
xx = c.submit(add, 1, 2, workers=a.address)
yy = c.submit(add, 3, 4, workers=b.address)
results = await c._gather([xx, yy])
assert tuple(results) == (3, 7)
@gen_cluster(client=True)
async def test_upload_file(c, s, a, b):
assert not os.path.exists(os.path.join(a.local_directory, "foobar.py"))
assert not os.path.exists(os.path.join(b.local_directory, "foobar.py"))
assert a.local_directory != b.local_directory
with rpc(a.address) as aa, rpc(b.address) as bb:
await asyncio.gather(
aa.upload_file(filename="foobar.py", data=b"x = 123"),
bb.upload_file(filename="foobar.py", data="x = 123"),
)
assert os.path.exists(os.path.join(a.local_directory, "foobar.py"))
assert os.path.exists(os.path.join(b.local_directory, "foobar.py"))
def g():
import foobar
return foobar.x
future = c.submit(g, workers=a.address)
result = await future
assert result == 123
await c.close()
await s.close(close_workers=True)
assert not os.path.exists(os.path.join(a.local_directory, "foobar.py"))
@pytest.mark.skip(reason="don't yet support uploading pyc files")
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)])
async def test_upload_file_pyc(c, s, w):
with tmpfile() as dirname:
os.mkdir(dirname)
with open(os.path.join(dirname, "foo.py"), mode="w") as f:
f.write("def f():\n return 123")
sys.path.append(dirname)
try:
import foo
assert foo.f() == 123
pyc = importlib.util.cache_from_source(os.path.join(dirname, "foo.py"))
assert os.path.exists(pyc)
await c.upload_file(pyc)
def g():
import foo
return foo.x
future = c.submit(g)
result = await future
assert result == 123
finally:
sys.path.remove(dirname)
@gen_cluster(client=True)
async def test_upload_egg(c, s, a, b):
eggname = "testegg-1.0.0-py3.4.egg"
local_file = __file__.replace("test_worker.py", eggname)
assert not os.path.exists(os.path.join(a.local_directory, eggname))
assert not os.path.exists(os.path.join(b.local_directory, eggname))
assert a.local_directory != b.local_directory
await c.upload_file(filename=local_file)
assert os.path.exists(os.path.join(a.local_directory, eggname))
assert os.path.exists(os.path.join(b.local_directory, eggname))
def g(x):
import testegg
return testegg.inc(x)
future = c.submit(g, 10, workers=a.address)
result = await future
assert result == 10 + 1
await c.close()
await s.close()
await a.close()
await b.close()
assert not os.path.exists(os.path.join(a.local_directory, eggname))
@gen_cluster(client=True)
async def test_upload_pyz(c, s, a, b):
pyzname = "mytest.pyz"
local_file = __file__.replace("test_worker.py", pyzname)
assert not os.path.exists(os.path.join(a.local_directory, pyzname))
assert not os.path.exists(os.path.join(b.local_directory, pyzname))
assert a.local_directory != b.local_directory
await c.upload_file(filename=local_file)
assert os.path.exists(os.path.join(a.local_directory, pyzname))
assert os.path.exists(os.path.join(b.local_directory, pyzname))
def g(x):
from mytest import mytest
return mytest.inc(x)
future = c.submit(g, 10, workers=a.address)
result = await future
assert result == 10 + 1
await c.close()
await s.close()
await a.close()
await b.close()
assert not os.path.exists(os.path.join(a.local_directory, pyzname))
@pytest.mark.xfail(reason="Still lose time to network I/O")
@gen_cluster(client=True)
async def test_upload_large_file(c, s, a, b):
pytest.importorskip("crick")
await asyncio.sleep(0.05)
with rpc(a.address) as aa:
await aa.upload_file(filename="myfile.dat", data=b"0" * 100000000)
await asyncio.sleep(0.05)
assert a.digests["tick-duration"].components[0].max() < 0.050
@gen_cluster()
async def test_broadcast(s, a, b):
with rpc(s.address) as cc:
results = await cc.broadcast(msg={"op": "ping"})
assert results == {a.address: b"pong", b.address: b"pong"}
@gen_cluster(nthreads=[])
async def test_worker_with_port_zero(s):
async with Worker(s.address) as w:
assert isinstance(w.port, int)
assert w.port > 1024
@gen_cluster(nthreads=[])
async def test_worker_port_range(s):
port = "9867:9868"
async with Worker(s.address, port=port) as w1:
assert w1.port == 9867 # Selects first port in range
async with Worker(s.address, port=port) as w2:
assert w2.port == 9868 # Selects next port in range
with pytest.raises(
ValueError, match="Could not start Worker"
): # No more ports left
async with Worker(s.address, port=port):
pass
@pytest.mark.slow
@gen_test(timeout=60)
async def test_worker_waits_for_scheduler():
w = Worker("127.0.0.1:8724")
try:
await asyncio.wait_for(w, 3)
except TimeoutError:
pass
else:
assert False
assert w.status not in (Status.closed, Status.running, Status.paused)
await w.close(timeout=0.1)
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)])
async def test_worker_task_data(c, s, w):
x = delayed(2)
xx = c.persist(x)
await wait(xx)
assert w.data[x.key] == 2
def test_error_message():
class MyException(Exception):
def __init__(self, a, b):
self.args = (a + b,)
def __str__(self):
return "MyException(%s)" % self.args
msg = error_message(MyException("Hello", "World!"))
assert "Hello" in str(msg["exception"])
max_error_len = 100
with dask.config.set({"distributed.admin.max-error-length": max_error_len}):
msg = error_message(RuntimeError("-" * max_error_len))
assert len(msg["exception_text"]) <= max_error_len + 30
assert len(msg["exception_text"]) < max_error_len * 2
msg = error_message(RuntimeError("-" * max_error_len * 20))
max_error_len = 1000000
with dask.config.set({"distributed.admin.max-error-length": max_error_len}):
msg = error_message(RuntimeError("-" * max_error_len * 2))
assert len(msg["exception_text"]) > 10100 # default + 100
@gen_cluster(client=True)
async def test_chained_error_message(c, s, a, b):
def chained_exception_fn():
class MyException(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return "MyException(%s)" % self.msg
exception = MyException("Foo")
inner_exception = MyException("Bar")
try:
raise inner_exception
except Exception as e:
raise exception from e
f = c.submit(chained_exception_fn)
try:
await f
except Exception as e:
assert e.__cause__ is not None
assert "Bar" in str(e.__cause__)
@gen_cluster(client=True)
async def test_gather(c, s, a, b):
x, y = await c.scatter(["x", "y"], workers=[b.address])
with rpc(a.address) as aa:
resp = await aa.gather(who_has={x.key: [b.address], y.key: [b.address]})
assert resp == {"status": "OK"}
assert a.data[x.key] == b.data[x.key] == "x"
assert a.data[y.key] == b.data[y.key] == "y"
@gen_cluster(client=True)
async def test_gather_missing_keys(c, s, a, b):
"""A key is missing. Other keys are gathered successfully."""
x = await c.scatter("x", workers=[b.address])
with rpc(a.address) as aa:
resp = await aa.gather(who_has={x.key: [b.address], "y": [b.address]})
assert resp == {"status": "partial-fail", "keys": {"y": (b.address,)}}
assert a.data[x.key] == b.data[x.key] == "x"
@gen_cluster(client=True, worker_kwargs={"timeout": "100ms"})
async def test_gather_missing_workers(c, s, a, b):
"""A worker owning the only copy of a key is missing.
Keys from other workers are gathered successfully.
"""
assert b.address.startswith("tcp://127.0.0.1:")
bad_addr = "tcp://127.0.0.1:12345"
x = await c.scatter("x", workers=[b.address])
with rpc(a.address) as aa:
resp = await aa.gather(who_has={x.key: [b.address], "y": [bad_addr]})
assert resp == {"status": "partial-fail", "keys": {"y": (bad_addr,)}}
assert a.data[x.key] == b.data[x.key] == "x"
@pytest.mark.parametrize("missing_first", [False, True])
@gen_cluster(client=True, worker_kwargs={"timeout": "100ms"})
async def test_gather_missing_workers_replicated(c, s, a, b, missing_first):
"""A worker owning a redundant copy of a key is missing.
The key is successfully gathered from other workers.
"""
assert b.address.startswith("tcp://127.0.0.1:")
x = await c.scatter("x", workers=[b.address])
bad_addr = "tcp://127.0.0.1:12345"
# Order matters! Test both
addrs = [bad_addr, b.address] if missing_first else [b.address, bad_addr]
with rpc(a.address) as aa:
resp = await aa.gather(who_has={x.key: addrs})
assert resp == {"status": "OK"}
assert a.data[x.key] == b.data[x.key] == "x"
@gen_cluster(nthreads=[])
async def test_io_loop(s):
async with Worker(s.address, loop=s.loop) as w:
assert w.io_loop is s.loop
@gen_cluster(client=True, nthreads=[])
async def test_spill_to_disk(c, s):
np = pytest.importorskip("numpy")
w = await Worker(
s.address,
loop=s.loop,
memory_limit=1200 / 0.6,
memory_pause_fraction=None,
memory_spill_fraction=None,
)
x = c.submit(np.random.randint, 0, 255, size=500, dtype="u1", key="x")
await wait(x)
y = c.submit(np.random.randint, 0, 255, size=500, dtype="u1", key="y")
await wait(y)
assert set(w.data) == {x.key, y.key}
assert set(w.data.memory) == {x.key, y.key}
z = c.submit(np.random.randint, 0, 255, size=500, dtype="u1", key="z")
await wait(z)
assert set(w.data) == {x.key, y.key, z.key}
assert set(w.data.memory) == {y.key, z.key}
assert set(w.data.disk) == {x.key}
await x
assert set(w.data.memory) == {x.key, z.key}
assert set(w.data.disk) == {y.key}
await w.close()
@gen_cluster(client=True)
async def test_access_key(c, s, a, b):
def f(i):
from distributed.worker import thread_state
return thread_state.key
futures = [c.submit(f, i, key="x-%d" % i) for i in range(20)]
results = await c._gather(futures)
assert list(results) == ["x-%d" % i for i in range(20)]
@gen_cluster(client=True)
async def test_run_dask_worker(c, s, a, b):
def f(dask_worker=None):
return dask_worker.id
response = await c._run(f)
assert response == {a.address: a.id, b.address: b.id}
@gen_cluster(client=True)
async def test_run_coroutine_dask_worker(c, s, a, b):
async def f(dask_worker=None):
await asyncio.sleep(0.001)
return dask_worker.id
response = await c.run(f)
assert response == {a.address: a.id, b.address: b.id}
@gen_cluster(client=True, nthreads=[])
async def test_Executor(c, s):
with ThreadPoolExecutor(2) as e:
async with Worker(s.address, executor=e) as w:
assert w.executor is e
future = c.submit(inc, 1)
result = await future
assert result == 2
assert e._threads # had to do some work
@pytest.mark.skip(
reason="Other tests leak memory, so process-level checks trigger immediately"
)
@gen_cluster(
client=True,
nthreads=[("127.0.0.1", 1)],
timeout=30,
worker_kwargs={"memory_limit": 10e6},
)
async def test_spill_by_default(c, s, w):
da = pytest.importorskip("dask.array")
x = da.ones(int(10e6 * 0.7), chunks=1e6, dtype="u1")
y = c.persist(x)
await wait(y)
assert len(w.data.disk) # something is on disk
@gen_cluster(nthreads=[("127.0.0.1", 1)], worker_kwargs={"reconnect": False})
async def test_close_on_disconnect(s, w):
await s.close()
start = time()
while w.status != Status.closed:
await asyncio.sleep(0.01)
assert time() < start + 5
@gen_cluster(nthreads=[])
async def test_memory_limit_auto(s):
async with Worker(s.address, nthreads=1) as a, Worker(
s.address, nthreads=2
) as b, Worker(s.address, nthreads=100) as c, Worker(s.address, nthreads=200) as d:
assert isinstance(a.memory_limit, Number)
assert isinstance(b.memory_limit, Number)
if CPU_COUNT > 1:
assert a.memory_limit < b.memory_limit
assert c.memory_limit == d.memory_limit
@gen_cluster(client=True)
async def test_inter_worker_communication(c, s, a, b):
[x, y] = await c._scatter([1, 2], workers=a.address)
future = c.submit(add, x, y, workers=b.address)
result = await future
assert result == 3
@gen_cluster(client=True)
async def test_clean(c, s, a, b):
x = c.submit(inc, 1, workers=a.address)
y = c.submit(inc, x, workers=b.address)
await y
collections = [
a.tasks,
a.data,
a.threads,
]
for c in collections:
assert c
x.release()
y.release()
while x.key in a.tasks:
await asyncio.sleep(0.01)
for c in collections:
assert not c
@gen_cluster(client=True)
async def test_message_breakup(c, s, a, b):
n = 100000
a.target_message_size = 10 * n
b.target_message_size = 10 * n
xs = [c.submit(mul, b"%d" % i, n, workers=a.address) for i in range(30)]
y = c.submit(lambda *args: None, xs, workers=b.address)
await y
assert 2 <= len(b.incoming_transfer_log) <= 20
assert 2 <= len(a.outgoing_transfer_log) <= 20
assert all(msg["who"] == b.address for msg in a.outgoing_transfer_log)
assert all(msg["who"] == a.address for msg in a.incoming_transfer_log)
@gen_cluster(client=True)
async def test_types(c, s, a, b):
assert all(ts.type is None for ts in a.tasks.values())
assert all(ts.type is None for ts in b.tasks.values())
x = c.submit(inc, 1, workers=a.address)
await wait(x)
assert a.tasks[x.key].type == int
y = c.submit(inc, x, workers=b.address)
await wait(y)
assert b.tasks[x.key].type == int
assert b.tasks[y.key].type == int
await c._cancel(y)
start = time()
while y.key in b.data:
await asyncio.sleep(0.01)
assert time() < start + 5
assert y.key not in b.tasks
@gen_cluster()
async def test_system_monitor(s, a, b):
assert b.monitor
b.monitor.update()
@gen_cluster(
client=True, nthreads=[("127.0.0.1", 2, {"resources": {"A": 1}}), ("127.0.0.1", 1)]
)
async def test_restrictions(c, s, a, b):
# Worker has resource available
assert a.available_resources == {"A": 1}
# Resource restrictions
x = c.submit(inc, 1, resources={"A": 1})
await x
ts = a.tasks[x.key]
assert ts.resource_restrictions == {"A": 1}
await c._cancel(x)
while ts.state != "memory":
# Resource should be unavailable while task isn't finished
assert a.available_resources == {"A": 0}
await asyncio.sleep(0.01)
# Resource restored after task is in memory
assert a.available_resources["A"] == 1
@gen_cluster(client=True)
async def test_clean_nbytes(c, s, a, b):
L = [delayed(inc)(i) for i in range(10)]
for i in range(5):
L = [delayed(add)(x, y) for x, y in sliding_window(2, L)]
total = delayed(sum)(L)
future = c.compute(total)
await wait(future)
await asyncio.sleep(1)
assert (
len(list(filter(None, [ts.nbytes for ts in a.tasks.values()])))
+ len(list(filter(None, [ts.nbytes for ts in b.tasks.values()])))
== 1
)
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 20)
async def test_gather_many_small(c, s, a, *workers):
"""If the dependencies of a given task are very small, do not limit the
number of concurrent outgoing connections
"""
a.total_out_connections = 2
futures = await c._scatter(list(range(100)))
assert all(w.data for w in workers)
def f(*args):
return 10
future = c.submit(f, *futures, workers=a.address)
await wait(future)
types = list(pluck(0, a.log))
req = [i for i, t in enumerate(types) if t == "request-dep"]
recv = [i for i, t in enumerate(types) if t == "receive-dep"]
assert min(recv) > max(req)
assert a.comm_nbytes == 0
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 3)
async def test_multiple_transfers(c, s, w1, w2, w3):
x = c.submit(inc, 1, workers=w1.address)
y = c.submit(inc, 2, workers=w2.address)
z = c.submit(add, x, y, workers=w3.address)
await wait(z)
r = w3.tasks[z.key].startstops
transfers = [t for t in r if t["action"] == "transfer"]
assert len(transfers) == 2
@pytest.mark.xfail(reason="very high flakiness")
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 3)
async def test_share_communication(c, s, w1, w2, w3):
x = c.submit(mul, b"1", int(w3.target_message_size + 1), workers=w1.address)
y = c.submit(mul, b"2", int(w3.target_message_size + 1), workers=w2.address)
await wait([x, y])
await c._replicate([x, y], workers=[w1.address, w2.address])
z = c.submit(add, x, y, workers=w3.address)
await wait(z)
assert len(w3.incoming_transfer_log) == 2
assert w1.outgoing_transfer_log
assert w2.outgoing_transfer_log
@pytest.mark.xfail(reason="very high flakiness")
@gen_cluster(client=True)
async def test_dont_overlap_communications_to_same_worker(c, s, a, b):
x = c.submit(mul, b"1", int(b.target_message_size + 1), workers=a.address)
y = c.submit(mul, b"2", int(b.target_message_size + 1), workers=a.address)
await wait([x, y])
z = c.submit(add, x, y, workers=b.address)
await wait(z)
assert len(b.incoming_transfer_log) == 2
l1, l2 = b.incoming_transfer_log
assert l1["stop"] < l2["start"]
@gen_cluster(client=True)
async def test_log_exception_on_failed_task(c, s, a, b):
with captured_logger("distributed.worker") as logger:
future = c.submit(div, 1, 0)
await wait(future)
await asyncio.sleep(0.1)
text = logger.getvalue()
assert "ZeroDivisionError" in text
assert "Exception" in text
@gen_cluster(client=True)
async def test_clean_up_dependencies(c, s, a, b):
x = delayed(inc)(1)
y = delayed(inc)(2)
xx = delayed(inc)(x)
yy = delayed(inc)(y)
z = delayed(add)(xx, yy)
zz = c.persist(z)
await wait(zz)
while len(a.data) + len(b.data) > 1:
await asyncio.sleep(0.01)
assert set(a.data) | set(b.data) == {zz.key}
@gen_cluster(client=True)
async def test_hold_onto_dependents(c, s, a, b):
x = c.submit(inc, 1, workers=a.address)
y = c.submit(inc, x, workers=b.address)
await wait(y)
assert x.key in b.data
await c._cancel(y)
while x.key not in b.data:
await asyncio.sleep(0.1)
# Normally takes >2s but it has been observed to take >30s occasionally
@pytest.mark.slow
@gen_test(timeout=120)
async def test_worker_death_timeout():
w = Worker("tcp://127.0.0.1:12345", death_timeout=0.1)
with pytest.raises(TimeoutError) as info:
await w
assert "Worker" in str(info.value)
assert "timed out" in str(info.value) or "failed to start" in str(info.value)
assert w.status == Status.closed
@gen_cluster(client=True)
async def test_stop_doing_unnecessary_work(c, s, a, b):
futures = c.map(slowinc, range(1000), delay=0.01)
await asyncio.sleep(0.1)
del futures
start = time()
while a.executing_count:
await asyncio.sleep(0.01)
assert time() - start < 0.5
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)])
async def test_priorities(c, s, w):
values = []
for i in range(10):
a = delayed(slowinc)(i, dask_key_name="a-%d" % i, delay=0.01)
a1 = delayed(inc)(a, dask_key_name="a1-%d" % i)
a2 = delayed(inc)(a1, dask_key_name="a2-%d" % i)
b1 = delayed(dec)(a, dask_key_name="b1-%d" % i) # <<-- least favored
values.append(a2)
values.append(b1)
futures = c.compute(values)
await wait(futures)
log = [
t[0]
for t in w.log
if t[1] == "executing" and t[2] == "memory" and not t[0].startswith("finalize")
]
assert any(key.startswith("b1") for key in log[: len(log) // 2])
@gen_cluster(client=True)
async def test_heartbeats(c, s, a, b):
x = s.workers[a.address].last_seen
start = time()
await asyncio.sleep(a.periodic_callbacks["heartbeat"].callback_time / 1000 + 0.1)
while s.workers[a.address].last_seen == x:
await asyncio.sleep(0.01)
assert time() < start + 2
assert a.periodic_callbacks["heartbeat"].callback_time < 1000
@pytest.mark.parametrize("worker", [Worker, Nanny])
def test_worker_dir(worker):
with tmpfile() as fn:
@gen_cluster(client=True, worker_kwargs={"local_directory": fn})
async def test_worker_dir(c, s, a, b):
directories = [w.local_directory for w in s.workers.values()]
assert all(d.startswith(fn) for d in directories)
assert len(set(directories)) == 2 # distinct
test_worker_dir()
@gen_cluster(nthreads=[])
async def test_false_worker_dir(s):
async with Worker(s.address, local_directory="") as w:
local_directory = w.local_directory
cwd = os.getcwd()
assert os.path.dirname(local_directory) == os.path.join(cwd, "dask-worker-space")
@gen_cluster(client=True)
async def test_dataframe_attribute_error(c, s, a, b):
class BadSize:
def __init__(self, data):
self.data = data
def __sizeof__(self):
raise TypeError("Hello")
future = c.submit(BadSize, 123)
result = await future
assert result.data == 123
@gen_cluster(client=True)
async def test_fail_write_to_disk(c, s, a, b):
class Bad:
def __getstate__(self):
raise TypeError()
def __sizeof__(self):
return int(100e9)
future = c.submit(Bad)
await wait(future)
assert future.status == "error"
with pytest.raises(TypeError):
await future
futures = c.map(inc, range(10))
results = await c._gather(futures)
assert results == list(map(inc, range(10)))
@pytest.mark.skip(reason="Our logic here is faulty")
@gen_cluster(
nthreads=[("127.0.0.1", 2)], client=True, worker_kwargs={"memory_limit": 10e9}
)
async def test_fail_write_many_to_disk(c, s, a):
a.validate = False
await asyncio.sleep(0.1)
assert a.status == Status.running
class Bad:
def __init__(self, x):
pass
def __getstate__(self):
raise TypeError()
def __sizeof__(self):
return int(2e9)
futures = c.map(Bad, range(11))
future = c.submit(lambda *args: 123, *futures)
await wait(future)
with pytest.raises(Exception) as info:
await future
# workers still operational
result = await c.submit(inc, 1, workers=a.address)
assert result == 2
@gen_cluster()
async def test_pid(s, a, b):
assert s.workers[a.address].pid == os.getpid()
@gen_cluster(client=True)
async def test_get_client(c, s, a, b):
def f(x):
cc = get_client()
future = cc.submit(inc, x)
return future.result()
assert default_client() is c
future = c.submit(f, 10, workers=a.address)
result = await future
assert result == 11
assert a._client
assert not b._client
assert a._client is c
assert default_client() is c
a_client = a._client
for i in range(10):
await wait(c.submit(f, i))
assert a._client is a_client
def test_get_client_sync(client):
def f(x):
cc = get_client()
future = cc.submit(inc, x)
return future.result()
future = client.submit(f, 10)
assert future.result() == 11
@gen_cluster(client=True)
async def test_get_client_coroutine(c, s, a, b):
async def f():
client = await get_client()
future = client.submit(inc, 10)
result = await future
return result
results = await c.run(f)
assert results == {a.address: 11, b.address: 11}
def test_get_client_coroutine_sync(client, s, a, b):
async def f():
client = await get_client()
future = client.submit(inc, 10)
result = await future
return result
results = client.run(f)
assert results == {a["address"]: 11, b["address"]: 11}
@gen_cluster()
async def test_global_workers(s, a, b):
n = len(Worker._instances)
w = first(Worker._instances)
assert w is a or w is b
@pytest.mark.skipif(WINDOWS, reason="num_fds not supported on windows")
@gen_cluster(nthreads=[])
async def test_worker_fds(s):
proc = psutil.Process()
before = psutil.Process().num_fds()
async with Worker(s.address, loop=s.loop):
assert proc.num_fds() > before
while proc.num_fds() > before:
await asyncio.sleep(0.01)
@gen_cluster(nthreads=[])
async def test_service_hosts_match_worker(s):
async with Worker(s.address, host="tcp://0.0.0.0") as w:
sock = first(w.http_server._sockets.values())
assert sock.getsockname()[0] in ("::", "0.0.0.0")
async with Worker(
s.address, host="tcp://127.0.0.1", dashboard_address="0.0.0.0:0"
) as w:
sock = first(w.http_server._sockets.values())
assert sock.getsockname()[0] in ("::", "0.0.0.0")
async with Worker(s.address, host="tcp://127.0.0.1") as w:
sock = first(w.http_server._sockets.values())
assert sock.getsockname()[0] in ("::", "0.0.0.0")
# See what happens with e.g. `dask-worker --listen-address tcp://:8811`
async with Worker(s.address, host="") as w:
sock = first(w.http_server._sockets.values())
assert sock.getsockname()[0] in ("::", "0.0.0.0")
# Address must be a connectable address. 0.0.0.0 is not!
address_all = w.address.rsplit(":", 1)[0]
assert address_all in ("tcp://[::1]", "tcp://127.0.0.1")
# Check various malformed IPv6 addresses
# Since these hostnames get passed to distributed.comm.address_from_user_args,
# bracketing is mandatory for IPv6.
with pytest.raises(ValueError) as exc:
async with Worker(s.address, host="::") as w:
pass
assert "bracketed" in str(exc)
with pytest.raises(ValueError) as exc:
async with Worker(s.address, host="tcp://::1") as w:
pass
assert "bracketed" in str(exc)
@gen_cluster(nthreads=[])
async def test_start_services(s):
async with Worker(s.address, dashboard_address=1234) as w:
assert w.http_server.port == 1234
@gen_test()
async def test_scheduler_file():
with tmpfile() as fn:
async with Scheduler(scheduler_file=fn, dashboard_address=":0") as s:
async with Worker(scheduler_file=fn) as w:
assert set(s.workers) == {w.address}
@gen_cluster(client=True)
async def test_scheduler_delay(c, s, a, b):
old = a.scheduler_delay
assert abs(a.scheduler_delay) < 0.6
assert abs(b.scheduler_delay) < 0.6
await asyncio.sleep(a.periodic_callbacks["heartbeat"].callback_time / 1000 + 0.6)
assert a.scheduler_delay != old
@pytest.mark.flaky(reruns=10, reruns_delay=5)
@gen_cluster(client=True)
async def test_statistical_profiling(c, s, a, b):
futures = c.map(slowinc, range(10), delay=0.1)
await wait(futures)
profile = a.profile_keys["slowinc"]
assert profile["count"]
@pytest.mark.slow
@nodebug
@gen_cluster(
client=True,
timeout=30,
config={
"distributed.worker.profile.interval": "1ms",
"distributed.worker.profile.cycle": "100ms",
},
)
async def test_statistical_profiling_2(c, s, a, b):
da = pytest.importorskip("dask.array")
while True:
x = da.random.random(1000000, chunks=(10000,))
y = (x + x * 2) - x.sum().persist()
await wait(y)
profile = await a.get_profile()
text = str(profile)
if profile["count"] and "sum" in text and "random" in text:
break
@gen_cluster(
nthreads=[("127.0.0.1", 1)],
client=True,
worker_kwargs={"memory_monitor_interval": 10},
)
async def test_robust_to_bad_sizeof_estimates(c, s, a):
np = pytest.importorskip("numpy")
memory = psutil.Process().memory_info().rss
a.memory_limit = memory / 0.7 + 400e6
class BadAccounting:
def __init__(self, data):
self.data = data
def __sizeof__(self):
return 10
def f(n):
x = np.ones(int(n), dtype="u1")
result = BadAccounting(x)
return result
futures = c.map(f, [100e6] * 8, pure=False)
start = time()
while not a.data.disk:
await asyncio.sleep(0.1)
assert time() < start + 5
@pytest.mark.slow
@gen_cluster(
nthreads=[("127.0.0.1", 2)],
client=True,
worker_kwargs={
"memory_monitor_interval": 10,
"memory_spill_fraction": False, # don't spill
"memory_target_fraction": False,
"memory_pause_fraction": 0.5,
},
)
async def test_pause_executor(c, s, a):
memory = psutil.Process().memory_info().rss
a.memory_limit = memory / 0.5 + 200e6
np = pytest.importorskip("numpy")
def f():
x = np.ones(int(400e6), dtype="u1")
sleep(1)
with captured_logger(logging.getLogger("distributed.worker")) as logger:
future = c.submit(f)
futures = c.map(slowinc, range(30), delay=0.1)
while a.status != Status.paused:
await asyncio.sleep(0.01)
out = logger.getvalue()
assert "memory" in out.lower()
assert "pausing" in out.lower()
assert sum(f.status == "finished" for f in futures) < 4
await wait(futures)
@gen_cluster(client=True, worker_kwargs={"profile_cycle_interval": "50 ms"})
async def test_statistical_profiling_cycle(c, s, a, b):
futures = c.map(slowinc, range(20), delay=0.05)
await wait(futures)
await asyncio.sleep(0.01)
end = time()
assert len(a.profile_history) > 3
x = await a.get_profile(start=time() + 10, stop=time() + 20)
assert not x["count"]
x = await a.get_profile(start=0, stop=time() + 10)
recent = a.profile_recent["count"]
actual = sum(p["count"] for _, p in a.profile_history) + a.profile_recent["count"]
x2 = await a.get_profile(start=0, stop=time() + 10)
assert x["count"] <= actual <= x2["count"]
y = await a.get_profile(start=end - 0.300, stop=time())
assert 0 < y["count"] <= x["count"]
@gen_cluster(client=True)
async def test_get_current_task(c, s, a, b):
def some_name():
return get_worker().get_current_task()
result = await c.submit(some_name)
assert result.startswith("some_name")
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 2)
async def test_reschedule(c, s, a, b):
s.extensions["stealing"]._pc.stop()
a_address = a.address
def f(x):
sleep(0.1)
if get_worker().address == a_address:
raise Reschedule()
futures = c.map(f, range(4))
futures2 = c.map(slowinc, range(10), delay=0.1, workers=a.address)
await wait(futures)
assert all(f.key in b.data for f in futures)
@gen_cluster(nthreads=[])
async def test_deque_handler(s):
from distributed.worker import logger
async with Worker(s.address) as w:
deque_handler = w._deque_handler
logger.info("foo456")
assert deque_handler.deque
msg = deque_handler.deque[-1]
assert "distributed.worker" in deque_handler.format(msg)
assert any(msg.msg == "foo456" for msg in deque_handler.deque)
@gen_cluster(nthreads=[], client=True)
async def test_avoid_memory_monitor_if_zero_limit(c, s):
worker = await Worker(
s.address, loop=s.loop, memory_limit=0, memory_monitor_interval=10
)
assert type(worker.data) is dict
assert "memory" not in worker.periodic_callbacks
future = c.submit(inc, 1)
assert (await future) == 2
await asyncio.sleep(worker.memory_monitor_interval / 1000)
await c.submit(inc, 2) # worker doesn't pause
await worker.close()
@gen_cluster(
nthreads=[("127.0.0.1", 1)],
config={
"distributed.worker.memory.spill": False,
"distributed.worker.memory.target": False,
},
)
async def test_dict_data_if_no_spill_to_disk(s, w):
assert type(w.data) is dict
def test_get_worker_name(client):
def f():
get_client().submit(inc, 1).result()
client.run(f)
def func(dask_scheduler):
return list(dask_scheduler.clients)
start = time()
while not any("worker" in n for n in client.run_on_scheduler(func)):
sleep(0.1)
assert time() < start + 10
@gen_cluster(nthreads=[("127.0.0.1", 1)], worker_kwargs={"memory_limit": "2e3 MB"})
async def test_parse_memory_limit(s, w):
assert w.memory_limit == 2e9
@gen_cluster(nthreads=[], client=True)
async def test_scheduler_address_config(c, s):
with dask.config.set({"scheduler-address": s.address}):
worker = await Worker(loop=s.loop)
assert worker.scheduler.address == s.address
await worker.close()
@pytest.mark.xfail(reason="very high flakiness")
@pytest.mark.slow
@gen_cluster(client=True)
async def test_wait_for_outgoing(c, s, a, b):
np = pytest.importorskip("numpy")
x = np.random.random(10000000)
future = await c.scatter(x, workers=a.address)
y = c.submit(inc, future, workers=b.address)
await wait(y)
assert len(b.incoming_transfer_log) == len(a.outgoing_transfer_log) == 1
bb = b.incoming_transfer_log[0]["duration"]
aa = a.outgoing_transfer_log[0]["duration"]
ratio = aa / bb
assert 1 / 3 < ratio < 3
@pytest.mark.skipif(not LINUX, reason="Need 127.0.0.2 to mean localhost")
@gen_cluster(
nthreads=[("127.0.0.1", 1), ("127.0.0.1", 1), ("127.0.0.2", 1)], client=True
)
async def test_prefer_gather_from_local_address(c, s, w1, w2, w3):
x = await c.scatter(123, workers=[w1.address, w3.address], broadcast=True)
y = c.submit(inc, x, workers=[w2.address])
await wait(y)
assert any(d["who"] == w2.address for d in w1.outgoing_transfer_log)
assert not any(d["who"] == w2.address for d in w3.outgoing_transfer_log)
@gen_cluster(
client=True,
nthreads=[("127.0.0.1", 1)] * 20,
timeout=30,
config={"distributed.worker.connections.incoming": 1},
)
async def test_avoid_oversubscription(c, s, *workers):
np = pytest.importorskip("numpy")
x = c.submit(np.random.random, 1000000, workers=[workers[0].address])
await wait(x)
futures = [c.submit(len, x, pure=False, workers=[w.address]) for w in workers[1:]]
await wait(futures)
# Original worker not responsible for all transfers
assert len(workers[0].outgoing_transfer_log) < len(workers) - 2
# Some other workers did some work
assert len([w for w in workers if len(w.outgoing_transfer_log) > 0]) >= 3
@gen_cluster(client=True, worker_kwargs={"metrics": {"my_port": lambda w: w.port}})
async def test_custom_metrics(c, s, a, b):
assert s.workers[a.address].metrics["my_port"] == a.port
assert s.workers[b.address].metrics["my_port"] == b.port
@gen_cluster(client=True)
async def test_register_worker_callbacks(c, s, a, b):
# preload function to run
def mystartup(dask_worker):
dask_worker.init_variable = 1
def mystartup2():
import os
os.environ["MY_ENV_VALUE"] = "WORKER_ENV_VALUE"
return "Env set."
# Check that preload function has been run
def test_import(dask_worker):
return hasattr(dask_worker, "init_variable")
# and dask_worker.init_variable == 1
def test_startup2():
import os
return os.getenv("MY_ENV_VALUE", None) == "WORKER_ENV_VALUE"
# Nothing has been run yet
result = await c.run(test_import)
assert list(result.values()) == [False] * 2
result = await c.run(test_startup2)
assert list(result.values()) == [False] * 2
# Start a worker and check that startup is not run
worker = await Worker(s.address, loop=s.loop)
result = await c.run(test_import, workers=[worker.address])
assert list(result.values()) == [False]
await worker.close()
# Add a preload function
response = await c.register_worker_callbacks(setup=mystartup)
assert len(response) == 2
# Check it has been ran on existing worker
result = await c.run(test_import)
assert list(result.values()) == [True] * 2
# Start a worker and check it is ran on it
worker = await Worker(s.address, loop=s.loop)
result = await c.run(test_import, workers=[worker.address])
assert list(result.values()) == [True]
await worker.close()
# Register another preload function
response = await c.register_worker_callbacks(setup=mystartup2)
assert len(response) == 2
# Check it has been run
result = await c.run(test_startup2)
assert list(result.values()) == [True] * 2
# Start a worker and check it is ran on it
worker = await Worker(s.address, loop=s.loop)
result = await c.run(test_import, workers=[worker.address])
assert list(result.values()) == [True]
result = await c.run(test_startup2, workers=[worker.address])
assert list(result.values()) == [True]
await worker.close()
@gen_cluster(client=True)
async def test_register_worker_callbacks_err(c, s, a, b):
with pytest.raises(ZeroDivisionError):
await c.register_worker_callbacks(setup=lambda: 1 / 0)
@gen_cluster(nthreads=[])
async def test_data_types(s):
w = await Worker(s.address, data=dict)
assert isinstance(w.data, dict)
await w.close()
data = dict()
w = await Worker(s.address, data=data)
assert w.data is data
await w.close()
class Data(dict):
def __init__(self, x, y):
self.x = x
self.y = y
w = await Worker(s.address, data=(Data, {"x": 123, "y": 456}))
assert w.data.x == 123
assert w.data.y == 456
await w.close()
@gen_cluster(nthreads=[])
async def test_local_directory(s):
with tmpfile() as fn:
with dask.config.set(temporary_directory=fn):
w = await Worker(s.address)
assert w.local_directory.startswith(fn)
assert "dask-worker-space" in w.local_directory
@gen_cluster(nthreads=[])
async def test_local_directory_make_new_directory(s):
with tmpfile() as fn:
w = await Worker(s.address, local_directory=os.path.join(fn, "foo", "bar"))
assert w.local_directory.startswith(fn)
assert "foo" in w.local_directory
assert "dask-worker-space" in w.local_directory
@pytest.mark.skipif(not LINUX, reason="Need 127.0.0.2 to mean localhost")
@gen_cluster(nthreads=[], client=True)
async def test_host_address(c, s):
w = await Worker(s.address, host="127.0.0.2")
assert "127.0.0.2" in w.address
await w.close()
n = await Nanny(s.address, host="127.0.0.3")
assert "127.0.0.3" in n.address
assert "127.0.0.3" in n.worker_address
await n.close()
def test_resource_limit(monkeypatch):
assert parse_memory_limit("250MiB", 1, total_cores=1) == 1024 * 1024 * 250
new_limit = 1024 * 1024 * 200
import distributed.worker
monkeypatch.setattr(distributed.system, "MEMORY_LIMIT", new_limit)
assert parse_memory_limit("250MiB", 1, total_cores=1) == new_limit
@pytest.mark.asyncio
@pytest.mark.parametrize("Worker", [Worker, Nanny])
async def test_interface_async(cleanup, loop, Worker):
from distributed.utils import get_ip_interface
psutil = pytest.importorskip("psutil")
if_names = sorted(psutil.net_if_addrs())
for if_name in if_names:
try:
ipv4_addr = get_ip_interface(if_name)
except ValueError:
pass
else:
if ipv4_addr == "127.0.0.1":
break
else:
pytest.skip(
"Could not find loopback interface. "
"Available interfaces are: %s." % (if_names,)
)
async with Scheduler(dashboard_address=":0", interface=if_name) as s:
assert s.address.startswith("tcp://127.0.0.1")
async with Worker(s.address, interface=if_name) as w:
assert w.address.startswith("tcp://127.0.0.1")
assert w.ip == "127.0.0.1"
async with Client(s.address, asynchronous=True) as c:
info = c.scheduler_info()
assert "tcp://127.0.0.1" in info["address"]
assert all("127.0.0.1" == d["host"] for d in info["workers"].values())
@pytest.mark.asyncio
@pytest.mark.parametrize("Worker", [Worker, Nanny])
async def test_protocol_from_scheduler_address(cleanup, Worker):
pytest.importorskip("ucp")
async with Scheduler(protocol="ucx", dashboard_address=":0") as s:
assert s.address.startswith("ucx://")
async with Worker(s.address) as w:
assert w.address.startswith("ucx://")
async with Client(s.address, asynchronous=True) as c:
info = c.scheduler_info()
assert info["address"].startswith("ucx://")
@pytest.mark.asyncio
async def test_host_uses_scheduler_protocol(cleanup, monkeypatch):
# Ensure worker uses scheduler's protocol to determine host address, not the default scheme
# See https://github.com/dask/distributed/pull/4883
class BadBackend(TCPBackend):
def get_address_host(self, loc):
raise ValueError("asdf")
monkeypatch.setitem(backends, "foo", BadBackend())
with dask.config.set({"distributed.comm.default-scheme": "foo"}):
async with Scheduler(protocol="tcp", dashboard_address=":0") as s:
async with Worker(s.address):
# Ensure that worker is able to properly start up
# without BadBackend.get_address_host raising a ValueError
pass
@pytest.mark.asyncio
@pytest.mark.parametrize("Worker", [Worker, Nanny])
async def test_worker_listens_on_same_interface_by_default(cleanup, Worker):
async with Scheduler(host="localhost", dashboard_address=":0") as s:
assert s.ip in {"127.0.0.1", "localhost"}
async with Worker(s.address) as w:
assert s.ip == w.ip
@gen_cluster(client=True)
async def test_close_gracefully(c, s, a, b):
futures = c.map(slowinc, range(200), delay=0.1)
while not b.data:
await asyncio.sleep(0.1)
mem = set(b.data)
proc = [ts for ts in b.tasks.values() if ts.state == "executing"]
await b.close_gracefully()
assert b.status == Status.closed
assert b.address not in s.workers
assert mem.issubset(set(a.data))
for ts in proc:
assert ts.state in ("executing", "memory")
@pytest.mark.slow
@gen_cluster(client=True, nthreads=[])
async def test_lifetime(c, s):
async with Worker(s.address) as a, Worker(s.address, lifetime="1 seconds") as b:
futures = c.map(slowinc, range(200), delay=0.1, worker=[b.address])
await asyncio.sleep(1.5)
assert b.status not in (Status.running, Status.paused)
await b.finished()
assert set(b.data) == set(a.data) # successfully moved data over
@gen_cluster(worker_kwargs={"lifetime": "10s", "lifetime_stagger": "2s"})
async def test_lifetime_stagger(s, a, b):
assert a.lifetime != b.lifetime
assert 8 <= a.lifetime <= 12
assert 8 <= b.lifetime <= 12
@gen_cluster(nthreads=[])
async def test_bad_metrics(s):
def bad_metric(w):
raise Exception("Hello")
async with Worker(s.address, metrics={"bad": bad_metric}) as w:
assert "bad" not in s.workers[w.address].metrics
@gen_cluster(nthreads=[])
async def test_bad_startup(s):
def bad_startup(w):
raise Exception("Hello")
try:
await Worker(s.address, startup_information={"bad": bad_startup})
except Exception:
pytest.fail("Startup exception was raised")
@gen_cluster(client=True)
async def test_pip_install(c, s, a, b):
with mock.patch(
"distributed.diagnostics.plugin.subprocess.Popen.communicate",
return_value=(b"", b""),
) as p1:
with mock.patch(
"distributed.diagnostics.plugin.subprocess.Popen", return_value=p1
) as p2:
p1.communicate.return_value = b"", b""
p1.wait.return_value = 0
await c.register_worker_plugin(
PipInstall(packages=["requests"], pip_options=["--upgrade"])
)
args = p2.call_args[0][0]
assert "python" in args[0]
assert args[1:] == ["-m", "pip", "install", "--upgrade", "requests"]
@gen_cluster(client=True)
async def test_pip_install_fails(c, s, a, b):
with captured_logger(
"distributed.diagnostics.plugin", level=logging.ERROR
) as logger:
with mock.patch(
"distributed.diagnostics.plugin.subprocess.Popen.communicate",
return_value=(b"", b"error"),
) as p1:
with mock.patch(
"distributed.diagnostics.plugin.subprocess.Popen", return_value=p1
) as p2:
p1.communicate.return_value = (
b"",
b"Could not find a version that satisfies the requirement not-a-package",
)
p1.wait.return_value = 1
await c.register_worker_plugin(PipInstall(packages=["not-a-package"]))
assert "not-a-package" in logger.getvalue()
# args = p2.call_args[0][0]
# assert "python" in args[0]
# assert args[1:] == ["-m", "pip", "--upgrade", "install", "requests"]
@gen_cluster(nthreads=[])
async def test_update_latency(s):
async with await Worker(s.address) as w:
original = w.latency
await w.heartbeat()
assert original != w.latency
if w.digests is not None:
assert w.digests["latency"].size() > 0
@pytest.mark.slow
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)])
async def test_workerstate_executing(c, s, a):
ws = s.workers[a.address]
# Initially there are no active tasks
assert not ws.executing
# Submit a task and ensure the WorkerState is updated with the task
# it's executing
f = c.submit(slowinc, 1, delay=3)
while not ws.executing:
assert f.status == "pending"
await asyncio.sleep(0.01)
assert s.tasks[f.key] in ws.executing
await f
@pytest.mark.parametrize("reconnect", [True, False])
@gen_cluster(nthreads=[])
async def test_heartbeat_comm_closed(s, monkeypatch, reconnect):
with captured_logger("distributed.worker", level=logging.WARNING) as logger:
def bad_heartbeat_worker(*args, **kwargs):
raise CommClosedError()
async with await Worker(s.address, reconnect=reconnect) as w:
# Trigger CommClosedError during worker heartbeat
monkeypatch.setattr(w.scheduler, "heartbeat_worker", bad_heartbeat_worker)
await w.heartbeat()
if reconnect:
assert w.status == Status.running
else:
assert w.status == Status.closed
assert "Heartbeat to scheduler failed" in logger.getvalue()
@gen_cluster(nthreads=[])
async def test_bad_local_directory(s):
try:
async with Worker(s.address, local_directory="/not/a/valid-directory"):
pass
except OSError:
# On Linux: [Errno 13] Permission denied: '/not'
# On MacOSX: [Errno 30] Read-only file system: '/not'
pass
else:
assert WINDOWS
assert not any("error" in log for log in s.get_logs())
@gen_cluster(client=True, nthreads=[])
async def test_taskstate_metadata(c, s):
async with await Worker(s.address) as w:
await c.register_worker_plugin(TaskStateMetadataPlugin())
f = c.submit(inc, 1)
await f
ts = w.tasks[f.key]
assert "start_time" in ts.metadata
assert "stop_time" in ts.metadata
assert ts.metadata["stop_time"] > ts.metadata["start_time"]
# Check that Scheduler TaskState.metadata was also updated
assert s.tasks[f.key].metadata == ts.metadata
@gen_cluster(client=True, nthreads=[])
async def test_executor_offload(c, s, monkeypatch):
class SameThreadClass:
def __getstate__(self):
return ()
def __setstate__(self, state):
self._thread_ident = threading.get_ident()
return self
monkeypatch.setattr("distributed.worker.OFFLOAD_THRESHOLD", 1)
async with Worker(s.address, executor="offload") as w:
from distributed.utils import _offload_executor
assert w.executor is _offload_executor
x = SameThreadClass()
def f(x):
return threading.get_ident() == x._thread_ident
assert await c.submit(f, x)
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)])
async def test_story(c, s, w):
future = c.submit(inc, 1)
await future
ts = w.tasks[future.key]
assert ts.state in str(w.story(ts))
assert w.story(ts) == w.story(ts.key)
@gen_cluster(client=True)
async def test_story_with_deps(c, s, a, b):
"""
Assert that the structure of the story does not change unintentionally and
expected subfields are actually filled
"""
dep = c.submit(inc, 1, workers=[a.address])
res = c.submit(inc, dep, workers=[b.address])
await res
key = res.key
story = a.story(key)
assert story == []
story = b.story(key)
pruned_story = []
stimulus_ids = set()
# Story now includes randomized stimulus_ids and timestamps.
for msg in story:
assert isinstance(msg, tuple), msg
assert isinstance(msg[-1], float), msg
assert msg[-1] > time() - 60, msg
pruned_msg = list(msg)
stimulus_ids.add(msg[-2])
pruned_story.append(tuple(pruned_msg[:-2]))
assert len(stimulus_ids) == 3, stimulus_ids
stimulus_id = pruned_story[0][-1]
assert isinstance(stimulus_id, str)
assert stimulus_id.startswith("compute-task")
# This is a simple transition log
expected_story = [
(key, "compute-task"),
(key, "released", "waiting", "waiting", {dep.key: "fetch"}),
(key, "waiting", "ready", "ready", {}),
(key, "ready", "executing", "executing", {}),
(key, "put-in-memory"),
(key, "executing", "memory", "memory", {}),
]
assert pruned_story == expected_story
dep_story = dep.key
story = b.story(dep_story)
pruned_story = []
stimulus_ids = set()
for msg in story:
assert isinstance(msg, tuple), msg
assert isinstance(msg[-1], float), msg
assert msg[-1] > time() - 60, msg
pruned_msg = list(msg)
stimulus_ids.add(msg[-2])
pruned_story.append(tuple(pruned_msg[:-2]))
assert len(stimulus_ids) == 2, stimulus_ids
stimulus_id = pruned_story[0][-1]
assert isinstance(stimulus_id, str)
expected_story = [
(dep_story, "ensure-task-exists", "released"),
(dep_story, "released", "fetch", "fetch", {}),
(
"gather-dependencies",
a.address,
{dep.key},
),
(dep_story, "fetch", "flight", "flight", {}),
(
"request-dep",
a.address,
{dep.key},
),
(
"receive-dep",
a.address,
{dep.key},
),
(dep_story, "put-in-memory"),
(dep_story, "flight", "memory", "memory", {res.key: "ready"}),
]
assert pruned_story == expected_story
@gen_cluster(client=True)
async def test_gather_dep_one_worker_always_busy(c, s, a, b):
# Ensure that both dependencies for H are on another worker than H itself.
# The worker where the dependencies are on is then later blocked such that
# the data cannot be fetched
# In the past it was important that there is more than one key on the
# worker. This should be kept to avoid any edge case specific to one
f = c.submit(inc, 1, workers=[a.address])
g = c.submit(
inc,
2,
workers=[a.address],
)
await f
await g
# We will block A for any outgoing communication. This simulates an
# overloaded worker which will always return "busy" for get_data requests,
# effectively blocking H indefinitely
a.outgoing_current_count = 10000000
assert f.key in a.tasks
assert g.key in a.tasks
# Ensure there are actually two distinct tasks and not some pure=True
# caching
assert f.key != g.key
h = c.submit(add, f, g, workers=[b.address])
fut = asyncio.wait_for(h, 0.1)
while h.key not in b.tasks:
await asyncio.sleep(0.01)
ts_h = b.tasks[h.key]
ts_f = b.tasks[f.key]
ts_g = b.tasks[g.key]
with pytest.raises(asyncio.TimeoutError):
assert ts_h.state == "waiting"
assert ts_f.state in ["flight", "fetch"]
assert ts_g.state in ["flight", "fetch"]
await fut
# Ensure B wasn't lazy but tried at least once
assert b.repetitively_busy
x = await Worker(s.address, name="x")
# We "scatter" the data to another worker which is able to serve this data.
# In reality this could be another worker which fetched this dependency and
# got through to A or another worker executed the task using work stealing
# or any other. To avoid cross effects, we'll just put the data onto the
# worker ourselves
x.update_data(data={key: a.data[key] for key in [f.key, g.key]})
assert await h == 5
# Since we put the data onto the worker ourselves, the gather_dep might
# still be mid execution and we'll get a dangling task. Let it finish
# naturally
while any(["Worker.gather_dep" in str(t) for t in asyncio.all_tasks()]):
await asyncio.sleep(0.05)
@gen_cluster(client=True, nthreads=[("127.0.0.1", 0)])
async def test_worker_client_uses_default_no_close(c, s, a):
"""
If a default client is available in the process, the worker will pick this
one and will not close it if it is closed
"""
assert not Worker._initialized_clients
assert default_client() is c
existing_client = c.id
def get_worker_client_id():
def_client = get_client()
return def_client.id
worker_client = await c.submit(get_worker_client_id)
assert worker_client == existing_client
assert not Worker._initialized_clients
await a.close()
assert len(Client._instances) == 1
assert c.status == "running"
c_def = default_client()
assert c is c_def
@gen_cluster(nthreads=[("127.0.0.1", 0)])
async def test_worker_client_closes_if_created_on_worker_one_worker(s, a):
async with Client(s.address, set_as_default=False, asynchronous=True) as c:
with pytest.raises(ValueError):
default_client()
def get_worker_client_id():
def_client = get_client()
return def_client.id
new_client_id = await c.submit(get_worker_client_id)
default_client_id = await c.submit(get_worker_client_id)
assert new_client_id != c.id
assert new_client_id == default_client_id
new_client = default_client()
assert new_client_id == new_client.id
assert new_client.status == "running"
# If a worker closes, all clients created on it should close as well
await a.close()
assert new_client.status == "closed"
assert len(Client._instances) == 2
assert c.status == "running"
with pytest.raises(ValueError):
default_client()
@gen_cluster()
async def test_worker_client_closes_if_created_on_worker_last_worker_alive(s, a, b):
async with Client(s.address, set_as_default=False, asynchronous=True) as c:
with pytest.raises(ValueError):
default_client()
def get_worker_client_id():
def_client = get_client()
return def_client.id
new_client_id = await c.submit(get_worker_client_id, workers=[a.address])
default_client_id = await c.submit(get_worker_client_id, workers=[a.address])
default_client_id_b = await c.submit(get_worker_client_id, workers=[b.address])
assert not b._comms
assert new_client_id != c.id
assert new_client_id == default_client_id
assert new_client_id == default_client_id_b
new_client = default_client()
assert new_client_id == new_client.id
assert new_client.status == "running"
# We'll close A. This should *not* close the client since the client is also used by B
await a.close()
assert new_client.status == "running"
client_id_b_after = await c.submit(get_worker_client_id, workers=[b.address])
assert client_id_b_after == default_client_id_b
assert len(Client._instances) == 2
await b.close()
assert new_client.status == "closed"
assert c.status == "running"
with pytest.raises(ValueError):
default_client()
@gen_cluster(client=True, nthreads=[])
async def test_multiple_executors(c, s):
def get_thread_name():
return threading.current_thread().name
async with Worker(
s.address,
nthreads=2,
executor={"foo": ThreadPoolExecutor(1, thread_name_prefix="Dask-Foo-Threads")},
):
futures = []
with dask.annotate(executor="default"):
futures.append(c.submit(get_thread_name, pure=False))
with dask.annotate(executor="foo"):
futures.append(c.submit(get_thread_name, pure=False))
default_result, gpu_result = await c.gather(futures)
assert "Dask-Default-Threads" in default_result
assert "Dask-Foo-Threads" in gpu_result
@gen_cluster(client=True)
async def test_process_executor(c, s, a, b):
with ProcessPoolExecutor() as e:
a.executors["processes"] = e
b.executors["processes"] = e
future = c.submit(os.getpid, pure=False)
assert (await future) == os.getpid()
with dask.annotate(executor="processes"):
future = c.submit(os.getpid, pure=False)
assert (await future) != os.getpid()
def kill_process():
import os
import signal
if WINDOWS:
# There's no SIGKILL on Windows
sig = signal.SIGTERM
else:
# With SIGTERM there may be several seconds worth of delay before the worker
# actually shuts down - particularly on slow CI. Use SIGKILL for instant
# termination.
sig = signal.SIGKILL
os.kill(os.getpid(), sig)
sleep(60) # Cope with non-instantaneous termination
@gen_cluster(nthreads=[("127.0.0.1", 1)], client=True)
async def test_process_executor_kills_process(c, s, a):
with ProcessPoolExecutor() as e:
a.executors["processes"] = e
with dask.annotate(executor="processes", retries=1):
future = c.submit(kill_process)
msg = "A child process terminated abruptly, the process pool is not usable anymore"
with pytest.raises(BrokenProcessPool, match=msg):
await future
with dask.annotate(executor="processes", retries=1):
future = c.submit(inc, 1)
# The process pool is now unusable and the worker is effectively dead
with pytest.raises(BrokenProcessPool, match=msg):
await future
def raise_exc():
raise RuntimeError("foo")
@gen_cluster(client=True)
async def test_process_executor_raise_exception(c, s, a, b):
with ProcessPoolExecutor() as e:
a.executors["processes"] = e
b.executors["processes"] = e
with dask.annotate(executor="processes", retries=1):
future = c.submit(raise_exc)
with pytest.raises(RuntimeError, match="foo"):
await future
@pytest.mark.gpu
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)])
async def test_gpu_executor(c, s, w):
if nvml.device_get_count() > 0:
e = w.executors["gpu"]
assert isinstance(e, distributed.threadpoolexecutor.ThreadPoolExecutor)
assert e._max_workers == 1
else:
assert "gpu" not in w.executors
def assert_task_states_on_worker(expected, worker):
for dep_key, expected_state in expected.items():
assert dep_key in worker.tasks, (worker.name, dep_key, worker.tasks)
dep_ts = worker.tasks[dep_key]
assert dep_ts.state == expected_state, (worker.name, dep_ts, expected_state)
assert set(expected) == set(worker.tasks)
@gen_cluster(client=True)
async def test_worker_state_error_release_error_last(c, s, a, b):
"""
Create a chain of tasks and err one of them. Then release tasks in a certain
order and ensure the tasks are released and/or kept in memory as appropriate
F -- RES (error)
/
/
G
Free error last
"""
def raise_exc(*args):
raise RuntimeError()
f = c.submit(inc, 1, workers=[a.address], key="f")
g = c.submit(inc, 1, workers=[b.address], key="g")
res = c.submit(raise_exc, f, g, workers=[a.address])
with pytest.raises(RuntimeError):
await res
# Nothing bad happened on B, therefore B should hold on to G
assert len(b.tasks) == 1
assert g.key in b.tasks
# A raised the exception therefore we should hold on to the erroneous task
assert res.key in a.tasks
ts = a.tasks[res.key]
assert ts.state == "error"
expected_states = {
# A was instructed to compute this result and we're still holding a ref via `f`
f.key: "memory",
# This was fetched from another worker. While we hold a ref via `g`, the
# scheduler only instructed to compute this on B
g.key: "memory",
res.key: "error",
}
assert_task_states_on_worker(expected_states, a)
# Expected states after we release references to the futures
f.release()
g.release()
# We no longer hold any refs to f or g and B didn't have any erros. It
# releases everything as expected
while b.tasks:
await asyncio.sleep(0.01)
expected_states = {
f.key: "released",
g.key: "released",
res.key: "error",
}
assert_task_states_on_worker(expected_states, a)
res.release()
# We no longer hold any refs. Cluster should reset completely
# This is not happening
for server in [s, a, b]:
while server.tasks:
await asyncio.sleep(0.01)
@gen_cluster(client=True)
async def test_worker_state_error_release_error_first(c, s, a, b):
"""
Create a chain of tasks and err one of them. Then release tasks in a certain
order and ensure the tasks are released and/or kept in memory as appropriate
F -- RES (error)
/
/
G
Free error first
"""
def raise_exc(*args):
raise RuntimeError()
f = c.submit(inc, 1, workers=[a.address], key="f")
g = c.submit(inc, 1, workers=[b.address], key="g")
res = c.submit(raise_exc, f, g, workers=[a.address])
with pytest.raises(RuntimeError):
await res
# Nothing bad happened on B, therefore B should hold on to G
assert len(b.tasks) == 1
assert g.key in b.tasks
# A raised the exception therefore we should hold on to the erroneous task
assert res.key in a.tasks
ts = a.tasks[res.key]
assert ts.state == "error"
expected_states = {
# A was instructed to compute this result and we're still holding a ref
# via `f`
f.key: "memory",
# This was fetched from another worker. While we hold a ref via `g`, the
# scheduler only instructed to compute this on B
g.key: "memory",
res.key: "error",
}
assert_task_states_on_worker(expected_states, a)
# Expected states after we release references to the futures
res.release()
# We no longer hold any refs to f or g and B didn't have any erros. It
# releases everything as expected
while res.key in a.tasks:
await asyncio.sleep(0.01)
expected_states = {
f.key: "memory",
g.key: "memory",
}
assert_task_states_on_worker(expected_states, a)
f.release()
g.release()
for server in [s, a, b]:
while server.tasks:
await asyncio.sleep(0.01)
@gen_cluster(client=True)
async def test_worker_state_error_release_error_int(c, s, a, b):
"""
Create a chain of tasks and err one of them. Then release tasks in a certain
order and ensure the tasks are released and/or kept in memory as appropriate
F -- RES (error)
/
/
G
Free one successful task, then error, then last task
"""
def raise_exc(*args):
raise RuntimeError()
f = c.submit(inc, 1, workers=[a.address], key="f")
g = c.submit(inc, 1, workers=[b.address], key="g")
res = c.submit(raise_exc, f, g, workers=[a.address])
with pytest.raises(RuntimeError):
await res
# Nothing bad happened on B, therefore B should hold on to G
assert len(b.tasks) == 1
assert g.key in b.tasks
# A raised the exception therefore we should hold on to the erroneous task
assert res.key in a.tasks
ts = a.tasks[res.key]
assert ts.state == "error"
expected_states = {
# A was instructed to compute this result and we're still holding a ref via `f`
f.key: "memory",
# This was fetched from another worker. While we hold a ref via `g`, the
# scheduler only instructed to compute this on B
g.key: "memory",
res.key: "error",
}
assert_task_states_on_worker(expected_states, a)
# Expected states after we release references to the futures
f.release()
res.release()
# We no longer hold any refs to f or g and B didn't have any erros. It
# releases everything as expected
while len(a.tasks) > 1:
await asyncio.sleep(0.01)
expected_states = {
g.key: "memory",
}
assert_task_states_on_worker(expected_states, a)
assert_task_states_on_worker(expected_states, b)
g.release()
# We no longer hold any refs. Cluster should reset completely
for server in [s, a, b]:
while server.tasks:
await asyncio.sleep(0.01)
@gen_cluster(client=True)
async def test_worker_state_error_long_chain(c, s, a, b):
def raise_exc(*args):
raise RuntimeError()
# f (A) --------> res (B)
# /
# g (B) -> h (A)
f = c.submit(inc, 1, workers=[a.address], key="f", allow_other_workers=False)
g = c.submit(inc, 1, workers=[b.address], key="g", allow_other_workers=False)
h = c.submit(inc, g, workers=[a.address], key="h", allow_other_workers=False)
res = c.submit(
raise_exc, f, h, workers=[b.address], allow_other_workers=False, key="res"
)
with pytest.raises(RuntimeError):
await res
expected_states_A = {
f.key: "memory",
g.key: "memory",
h.key: "memory",
}
await asyncio.sleep(0.05)
assert_task_states_on_worker(expected_states_A, a)
expected_states_B = {
f.key: "memory",
g.key: "memory",
h.key: "memory",
res.key: "error",
}
await asyncio.sleep(0.05)
assert_task_states_on_worker(expected_states_B, b)
f.release()
expected_states_A = {
g.key: "memory",
h.key: "memory",
}
await asyncio.sleep(0.05)
assert_task_states_on_worker(expected_states_A, a)
expected_states_B = {
f.key: "released",
g.key: "memory",
h.key: "memory",
res.key: "error",
}
await asyncio.sleep(0.05)
assert_task_states_on_worker(expected_states_B, b)
g.release()
expected_states_A = {
g.key: "released",
h.key: "memory",
}
await asyncio.sleep(0.05)
assert_task_states_on_worker(expected_states_A, a)
# B must not forget a task since all have a still valid dependent
expected_states_B = {
f.key: "released",
h.key: "memory",
res.key: "error",
}
assert_task_states_on_worker(expected_states_B, b)
h.release()
await asyncio.sleep(0.05)
expected_states_A = {}
assert_task_states_on_worker(expected_states_A, a)
expected_states_B = {
f.key: "released",
h.key: "released",
res.key: "error",
}
assert_task_states_on_worker(expected_states_B, b)
res.release()
# We no longer hold any refs. Cluster should reset completely
for server in [s, a, b]:
while server.tasks:
await asyncio.sleep(0.01)
@gen_cluster(client=True, nthreads=[("127.0.0.1", x) for x in range(4)])
async def test_hold_on_to_replicas(c, s, *workers):
f1 = c.submit(inc, 1, workers=[workers[0].address], key="f1")
f2 = c.submit(inc, 2, workers=[workers[1].address], key="f2")
sum_1 = c.submit(
slowsum, [f1, f2], delay=0.1, workers=[workers[2].address], key="sum"
)
sum_2 = c.submit(
slowsum, [f1, sum_1], delay=0.2, workers=[workers[3].address], key="sum_2"
)
f1.release()
f2.release()
while sum_2.key not in workers[3].tasks:
await asyncio.sleep(0.01)
while not workers[3].tasks[sum_2.key].state == "memory":
assert len(s.tasks[f1.key].who_has) >= 2
assert s.tasks[f2.key].state == "released"
await asyncio.sleep(0.01)
while len(workers[2].data) > 1:
await asyncio.sleep(0.01)
@gen_cluster(client=True)
async def test_worker_reconnects_mid_compute(c, s, a, b):
"""Ensure that, if a worker disconnects while computing a result, the scheduler will
still accept the result.
There is also an edge case tested which ensures that the reconnect is
successful if a task is currently executing; see
https://github.com/dask/distributed/issues/5078
See also distributed.tests.test_scheduler.py::test_gather_allow_worker_reconnect
"""
with captured_logger("distributed.scheduler") as s_logs:
# Let's put one task in memory to ensure the reconnect has tasks in
# different states
f1 = c.submit(inc, 1, workers=[a.address], allow_other_workers=True)
await f1
a_address = a.address
a.periodic_callbacks["heartbeat"].stop()
await a.heartbeat()
a.heartbeat_active = True
from distributed import Lock
def fast_on_a(lock):
w = get_worker()
import time
if w.address != a_address:
lock.acquire()
else:
time.sleep(1)
lock = Lock()
# We want to be sure that A is the only one computing this result
async with lock:
f2 = c.submit(
fast_on_a, lock, workers=[a.address], allow_other_workers=True
)
while f2.key not in a.tasks:
await asyncio.sleep(0.01)
await s.stream_comms[a.address].close()
assert len(s.workers) == 1
a.heartbeat_active = False
await a.heartbeat()
assert len(s.workers) == 2
# Since B is locked, this is ensured to originate from A
await f2
assert "Unexpected worker completed task" in s_logs.getvalue()
# Ensure that all in-memory tasks on A have been restored on the
# scheduler after reconnect
for ts in a.tasks.values():
if ts.state == "memory":
assert a.address in {ws.address for ws in s.tasks[ts.key].who_has}
# Ensure that all keys have been properly registered and will also be
# cleaned up nicely.
del f1, f2
while any(w.tasks for w in [a, b]):
await asyncio.sleep(0.001)
@gen_cluster(client=True)
async def test_worker_reconnects_mid_compute_multiple_states_on_scheduler(c, s, a, b):
"""
Ensure that a reconnecting worker does not break the scheduler regardless of
what state the keys of the worker are in when it connects back
See also test_worker_reconnects_mid_compute which uses a smaller chain of
tasks and does not release f1 in between
"""
with captured_logger("distributed.scheduler") as s_logs:
# Let's put one task in memory to ensure the reconnect has tasks in
# different states
f1 = c.submit(inc, 1, workers=[a.address], allow_other_workers=True)
f2 = c.submit(inc, f1, workers=[a.address], allow_other_workers=True)
a_address = a.address
a.periodic_callbacks["heartbeat"].stop()
await a.heartbeat()
a.heartbeat_active = True
from distributed import Lock
def fast_on_a(lock):
w = get_worker()
import time
if w.address != a_address:
lock.acquire()
else:
time.sleep(1)
lock = Lock()
# We want to be sure that A is the only one computing this result
async with lock:
f3 = c.submit(
fast_on_a, lock, workers=[a.address], allow_other_workers=True
)
while f3.key not in a.tasks:
await asyncio.sleep(0.01)
await s.stream_comms[a.address].close()
f1.release()
assert len(s.workers) == 1
while s.tasks[f1.key].state != "released":
await asyncio.sleep(0)
a.heartbeat_active = False
await a.heartbeat()
assert len(s.workers) == 2
# Since B is locked, this is ensured to originate from A
await f3
assert "Unexpected worker completed task" in s_logs.getvalue()
# Ensure that all in-memory tasks on A have been restored on the
# scheduler after reconnect
for ts in a.tasks.values():
if ts.state == "memory":
assert a.address in {ws.address for ws in s.tasks[ts.key].who_has}
del f1, f2, f3
while any(w.tasks for w in [a, b]):
await asyncio.sleep(0.001)
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)])
async def test_forget_dependents_after_release(c, s, a):
fut = c.submit(inc, 1, key="f-1")
fut2 = c.submit(inc, fut, key="f-2")
await asyncio.wait([fut, fut2])
assert fut.key in a.tasks
assert fut2.key in a.tasks
assert fut2.key in {d.key for d in a.tasks[fut.key].dependents}
fut2.release()
while fut2.key in a.tasks:
await asyncio.sleep(0.001)
assert fut2.key not in {d.key for d in a.tasks[fut.key].dependents}
@gen_cluster(client=True)
async def test_steal_during_task_deserialization(c, s, a, b, monkeypatch):
stealing_ext = s.extensions["stealing"]
stealing_ext._pc.stop()
from distributed.utils import ThreadPoolExecutor
class CountingThreadPool(ThreadPoolExecutor):
counter = 0
def submit(self, *args, **kwargs):
CountingThreadPool.counter += 1
return super().submit(*args, **kwargs)
# Ensure we're always offloading
monkeypatch.setattr("distributed.worker.OFFLOAD_THRESHOLD", 1)
threadpool = CountingThreadPool(
max_workers=1, thread_name_prefix="Counting-Offload-Threadpool"
)
try:
monkeypatch.setattr("distributed.utils._offload_executor", threadpool)
class SlowDeserializeCallable:
def __init__(self, delay=0.1):
self.delay = delay
def __getstate__(self):
return self.delay
def __setstate__(self, state):
delay = state
import time
time.sleep(delay)
return SlowDeserializeCallable(delay)
def __call__(self, *args, **kwargs):
return 41
slow_deserialized_func = SlowDeserializeCallable()
fut = c.submit(
slow_deserialized_func, 1, workers=[a.address], allow_other_workers=True
)
while CountingThreadPool.counter == 0:
await asyncio.sleep(0)
ts = s.tasks[fut.key]
a.handle_steal_request(fut.key, stimulus_id="test")
stealing_ext.scheduler.send_task_to_worker(b.address, ts)
fut2 = c.submit(inc, fut, workers=[a.address])
fut3 = c.submit(inc, fut2, workers=[a.address])
assert await fut2 == 42
await fut3
finally:
threadpool.shutdown()
@gen_cluster(client=True)
async def test_gather_dep_exception_one_task(c, s, a, b):
"""Ensure an exception in a single task does not tear down an entire batch of gather_dep
See also https://github.com/dask/distributed/issues/5152
See also test_gather_dep_exception_one_task_2
"""
fut = c.submit(inc, 1, workers=[a.address], key="f1")
fut2 = c.submit(inc, 2, workers=[a.address], key="f2")
fut3 = c.submit(inc, 3, workers=[a.address], key="f3")
import asyncio
event = asyncio.Event()
write_queue = asyncio.Queue()
event.clear()
b.rpc = _LockedCommPool(b.rpc, write_event=event, write_queue=write_queue)
b.rpc.remove(a.address)
def sink(a, b, *args):
return a + b
res1 = c.submit(sink, fut, fut2, fut3, workers=[b.address])
res2 = c.submit(sink, fut, fut2, workers=[b.address])
# Wait until we're sure the worker is attempting to fetch the data
while True:
peer_addr, msg = await write_queue.get()
if peer_addr == a.address and msg["op"] == "get_data":
break
# Provoke an "impossible transision exception"
# By choosing a state which doesn't exist we're not running into validation
# errors and the state machine should raise if we want to transition from
# fetch to memory
b.validate = False
b.tasks[fut3.key].state = "fetch"
event.set()
assert await res1 == 5
assert await res2 == 5
del res1, res2, fut, fut2
fut3.release()
while a.tasks and b.tasks:
await asyncio.sleep(0.1)
@gen_cluster(client=True)
async def test_gather_dep_exception_one_task_2(c, s, a, b):
"""Ensure an exception in a single task does not tear down an entire batch of gather_dep
The below triggers an fetch->memory transition
See also https://github.com/dask/distributed/issues/5152
See also test_gather_dep_exception_one_task
"""
# This test does not trigger the condition reliably but is a very easy case
# which should function correctly regardles
fut1 = c.submit(inc, 1, workers=[a.address], key="f1")
fut2 = c.submit(inc, fut1, workers=[b.address], key="f2")
while fut1.key not in b.tasks or b.tasks[fut1.key].state == "flight":
await asyncio.sleep(0)
s.handle_missing_data(key="f1", errant_worker=a.address)
await fut2
def _acquire_replicas(scheduler, worker, *futures):
keys = [f.key for f in futures]
scheduler.stream_comms[worker.address].send(
{
"op": "acquire-replicas",
"keys": keys,
"stimulus_id": f"acquire-replicas-{time()}",
"priorities": {key: scheduler.tasks[key].priority for key in keys},
"who_has": {
key: {w.address for w in scheduler.tasks[key].who_has} for key in keys
},
},
)
def _remove_replicas(scheduler, worker, *futures):
keys = [f.key for f in futures]
ws = scheduler.workers[worker.address]
for k in keys:
ts = scheduler.tasks[k]
if ws in ts.who_has:
scheduler.remove_replica(ts, ws)
scheduler.stream_comms[ws.address].send(
{
"op": "remove-replicas",
"keys": keys,
"stimulus_id": f"remove-replicas-{time()}",
}
)
@gen_cluster(client=True)
async def test_acquire_replicas(c, s, a, b):
fut = c.submit(inc, 1, workers=[a.address])
await fut
_acquire_replicas(s, b, fut)
while len(s.who_has[fut.key]) != 2:
await asyncio.sleep(0.005)
for w in (a, b):
assert w.data[fut.key] == 2
assert w.tasks[fut.key].state == "memory"
fut.release()
while b.tasks or a.tasks:
await asyncio.sleep(0.005)
@gen_cluster(client=True)
async def test_acquire_replicas_same_channel(c, s, a, b):
fut = c.submit(inc, 1, workers=[a.address], key="f-replica")
futB = c.submit(inc, 2, workers=[a.address], key="f-B")
futC = c.submit(inc, futB, workers=[b.address], key="f-C")
await fut
_acquire_replicas(s, b, fut)
await futC
while fut.key not in b.tasks or not b.tasks[fut.key].state == "memory":
await asyncio.sleep(0.005)
while len(s.who_has[fut.key]) != 2:
await asyncio.sleep(0.005)
# Ensure that both the replica and an ordinary dependency pass through the
# same communication channel
for f in [fut, futB]:
assert any("request-dep" in msg for msg in b.story(f.key))
assert any("gather-dependencies" in msg for msg in b.story(f.key))
assert any(f.key in msg["keys"] for msg in b.incoming_transfer_log)
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 3)
async def test_acquire_replicas_many(c, s, *workers):
futs = c.map(inc, range(10), workers=[workers[0].address])
res = c.submit(sum, futs, workers=[workers[1].address])
final = c.submit(slowinc, res, delay=0.5, workers=[workers[1].address])
await wait(futs)
_acquire_replicas(s, workers[2], *futs)
# Worker 2 should normally not even be involved if there was no replication
while not all(
f.key in workers[2].tasks and workers[2].tasks[f.key].state == "memory"
for f in futs
):
await asyncio.sleep(0.01)
assert all(ts.state == "memory" for ts in workers[2].tasks.values())
assert await final == sum(map(inc, range(10))) + 1
# All workers have a replica
assert all(len(s.tasks[f.key].who_has) == 3 for f in futs)
del futs, res, final
while any(w.tasks for w in workers):
await asyncio.sleep(0.001)
@gen_cluster(client=True)
async def test_remove_replica_simple(c, s, a, b):
futs = c.map(inc, range(10), workers=[a.address])
await wait(futs)
_acquire_replicas(s, b, *futs)
while not all(len(s.tasks[f.key].who_has) == 2 for f in futs):
await asyncio.sleep(0.01)
_remove_replicas(s, b, *futs)
assert all(len(s.tasks[f.key].who_has) == 1 for f in futs)
while b.tasks:
await asyncio.sleep(0.01)
# Ensure there is no delayed reply to re-register the key
await asyncio.sleep(0.01)
assert all(s.tasks[f.key].who_has == {s.workers[a.address]} for f in futs)
@gen_cluster(
client=True,
config={"distributed.comm.recent-messages-log-length": 1_000},
)
async def test_remove_replica_while_computing(c, s, *workers):
futs = c.map(inc, range(10), workers=[workers[0].address])
# All interesting things will happen on that worker
w = workers[1]
intermediate = c.map(slowinc, futs, delay=0.05, workers=[w.address])
def reduce(*args, **kwargs):
import time
time.sleep(0.5)
return
final = c.submit(reduce, intermediate, workers=[w.address], key="final")
while not any(f.key in w.tasks for f in intermediate):
await asyncio.sleep(0.001)
# The scheduler removes keys from who_has/has_what immediately
# Make sure the worker responds to the rejection and the scheduler corrects
# the state
ws = s.workers[w.address]
while not any(s.tasks[fut.key] in ws.has_what for fut in futs):
await asyncio.sleep(0.001)
_remove_replicas(s, w, *futs)
# Scheduler removed keys immediately...
assert not any(s.tasks[fut.key] in ws.has_what for fut in futs)
# ... but the state is properly restored
while not any(s.tasks[fut.key] in ws.has_what for fut in futs):
await asyncio.sleep(0.01)
# The worker should reject all of these since they are required
while not all(fut.done() for fut in intermediate):
_remove_replicas(s, w, *futs)
await asyncio.sleep(0.01)
await wait(intermediate)
# If a request is rejected, the worker responds with an add-keys message to
# reenlist the key in the schedulers state system to avoid race conditions,
# see also https://github.com/dask/distributed/issues/5265
rejections = set()
for msg in w.log:
if msg[0] == "remove-replica-rejected":
rejections.update(msg[1])
for rejected_key in rejections:
def answer_sent(key):
for batch in w.batched_stream.recent_message_log:
for msg in batch:
if "op" in msg and msg["op"] == "add-keys" and key in msg["keys"]:
return True
return False
assert answer_sent(rejected_key)
# Since intermediate is done, futs replicas may be removed.
# They might be already gone due to the above remove replica calls
_remove_replicas(s, w, *futs)
while any(w.tasks[f.key].state != "released" for f in futs if f.key in w.tasks):
await asyncio.sleep(0.001)
# The scheduler actually gets notified about the removed replica
while not all(len(s.tasks[f.key].who_has) == 1 for f in futs):
await asyncio.sleep(0.001)
await final
del final, intermediate, futs
while any(w.tasks for w in workers):
await asyncio.sleep(0.001)
@gen_cluster(client=True, nthreads=[("", 1)] * 3)
async def test_who_has_consistent_remove_replica(c, s, *workers):
a = workers[0]
other_workers = {w for w in workers if w != a}
f1 = c.submit(inc, 1, key="f1", workers=[w.address for w in other_workers])
await wait(f1)
for w in other_workers:
_acquire_replicas(s, w, f1)
while not len(s.tasks[f1.key].who_has) == len(other_workers):
await asyncio.sleep(0)
f2 = c.submit(inc, f1, workers=[a.address])
# Wait just until the moment the worker received the task and scheduled the
# task to be fetched, then remove the replica from the worker this one is
# trying to get the data from. Ensure this is handled gracefully and no
# suspicious counters are raised since this is expected behaviour when
# removing replicas
while f1.key not in a.tasks or a.tasks[f1.key].state != "flight":
await asyncio.sleep(0)
coming_from = None
for w in other_workers:
coming_from = w
if w.address == a.tasks[f1.key].coming_from:
break
coming_from.handle_remove_replicas([f1.key], "test")
await f2
assert (f1.key, "missing-dep") in a.story(f1.key)
assert a.tasks[f1.key].suspicious_count == 0
assert s.tasks[f1.key].suspicious == 0
@gen_cluster(client=True)
async def test_missing_released_zombie_tasks(c, s, a, b):
"""
Ensure that no fetch/flight tasks are left in the task dict of a
worker after everything was released
"""
a.total_in_connections = 0
f1 = c.submit(inc, 1, key="f1", workers=[a.address])
f2 = c.submit(inc, f1, key="f2", workers=[b.address])
key = f1.key
while key not in b.tasks or b.tasks[key].state != "fetch":
await asyncio.sleep(0.01)
await a.close(report=False)
del f1, f2
while b.tasks:
await asyncio.sleep(0.01)
@gen_cluster(client=True)
async def test_missing_released_zombie_tasks_2(c, s, a, b):
a.total_in_connections = 0
f1 = c.submit(inc, 1, key="f1", workers=[a.address])
f2 = c.submit(inc, f1, key="f2", workers=[b.address])
while f1.key not in b.tasks:
await asyncio.sleep(0)
ts = b.tasks[f1.key]
assert ts.state == "fetch"
# A few things can happen to clear who_has. The dominant process is upon
# connection failure to a worker. Regardless of how the set was cleared, the
# task will be transitioned to missing where the worker is trying to
# reaquire this information from the scheduler. While this is happening on
# worker side, the tasks are released and we want to ensure that no dangling
# zombie tasks are left on the worker
ts.who_has.clear()
del f1, f2
while b.tasks:
await asyncio.sleep(0.01)
story = b.story(ts)
assert any("missing" in msg for msg in story)
@pytest.mark.slow
@gen_cluster(
client=True,
Worker=Nanny,
nthreads=[("", 1)],
config={"distributed.worker.memory.pause": 0.5},
worker_kwargs={"memory_limit": 2 ** 29}, # 500 MiB
)
async def test_worker_status_sync(c, s, a):
(ws,) = s.workers.values()
while ws.status != Status.running:
await asyncio.sleep(0.01)
def leak():
distributed._test_leak = "x" * 2 ** 28 # 250 MiB
def clear_leak():
del distributed._test_leak
await c.run(leak)
while ws.status != Status.paused:
await asyncio.sleep(0.01)
await c.run(clear_leak)
while ws.status != Status.running:
await asyncio.sleep(0.01)
await s.retire_workers()
while ws.status != Status.closed:
await asyncio.sleep(0.01)
events = [ev for _, ev in s.events[ws.address] if ev["action"] != "heartbeat"]
assert events == [
{"action": "add-worker"},
{
"action": "worker-status-change",
"prev-status": "undefined",
"status": "running",
},
{
"action": "worker-status-change",
"prev-status": "running",
"status": "paused",
},
{
"action": "worker-status-change",
"prev-status": "paused",
"status": "running",
},
{"action": "remove-worker", "processing-tasks": {}},
{"action": "retired"},
]
async def _wait_for_state(key: str, worker: Worker, state: str):
# Keep the sleep interval at 0 since the tests using this are very sensitive
# about timing. they intend to capture loop cycles after this specific
# condition was set
while key not in worker.tasks or worker.tasks[key].state != state:
await asyncio.sleep(0)
@gen_cluster(client=True)
async def test_gather_dep_cancelled_rescheduled(c, s, a, b):
"""At time of writing, the gather_dep implementation filtered tasks again
for in-flight state. The response parser, however, did not distinguish
resulting in unwanted missing-data signals to the scheduler, causing
potential rescheduling or data leaks.
If a cancelled key is rescheduled for fetching while gather_dep waits
internally for get_data, the response parser would misclassify this key and
cause the key to be recommended for a release causing deadlocks and/or lost
keys.
At time of writing, this transition was implemented wrongly and caused a
flight->cancelled transition which should be recoverable but the cancelled
state was corrupted by this transition since ts.done==True. This attribute
setting would cause a cancelled->fetch transition to actually drop the key
instead, causing https://github.com/dask/distributed/issues/5366
See also test_gather_dep_do_not_handle_response_of_not_requested_tasks
"""
import distributed
with mock.patch.object(distributed.worker.Worker, "gather_dep") as mocked_gather:
fut1 = c.submit(inc, 1, workers=[a.address], key="f1")
fut2 = c.submit(inc, fut1, workers=[a.address], key="f2")
await fut2
fut4 = c.submit(sum, fut1, fut2, workers=[b.address], key="f4")
fut3 = c.submit(inc, fut1, workers=[b.address], key="f3")
fut2_key = fut2.key
await _wait_for_state(fut2_key, b, "flight")
while not mocked_gather.call_args:
await asyncio.sleep(0)
fut4.release()
while fut4.key in b.tasks:
await asyncio.sleep(0)
assert b.tasks[fut2.key].state == "cancelled"
args, kwargs = mocked_gather.call_args
assert fut2.key in kwargs["to_gather"]
# The below synchronization and mock structure allows us to intercept the
# state after gather_dep has been scheduled and is waiting for the
# get_data_from_worker to finish. If state transitions happen during this
# time, the response parser needs to handle this properly
lock = asyncio.Lock()
event = asyncio.Event()
async with lock:
async def wait_get_data(*args, **kwargs):
event.set()
async with lock:
return await distributed.worker.get_data_from_worker(*args, **kwargs)
with mock.patch.object(
distributed.worker,
"get_data_from_worker",
side_effect=wait_get_data,
):
gather_dep_fut = asyncio.ensure_future(
Worker.gather_dep(b, *args, **kwargs)
)
await event.wait()
fut4 = c.submit(sum, [fut1, fut2], workers=[b.address], key="f4")
while b.tasks[fut2.key].state != "flight":
await asyncio.sleep(0.1)
await gather_dep_fut
f2_story = b.story(fut2.key)
assert f2_story
await fut3
await fut4
@gen_cluster(client=True)
async def test_gather_dep_do_not_handle_response_of_not_requested_tasks(c, s, a, b):
"""At time of writing, the gather_dep implementation filtered tasks again
for in-flight state. The response parser, however, did not distinguish
resulting in unwanted missing-data signals to the scheduler, causing
potential rescheduling or data leaks.
This test may become obsolete if the implementation changes significantly.
"""
import distributed
with mock.patch.object(distributed.worker.Worker, "gather_dep") as mocked_gather:
fut1 = c.submit(inc, 1, workers=[a.address], key="f1")
fut2 = c.submit(inc, fut1, workers=[a.address], key="f2")
await fut2
fut4 = c.submit(sum, fut1, fut2, workers=[b.address], key="f4")
fut3 = c.submit(inc, fut1, workers=[b.address], key="f3")
fut2_key = fut2.key
await _wait_for_state(fut2_key, b, "flight")
while not mocked_gather.call_args:
await asyncio.sleep(0)
fut4.release()
while fut4.key in b.tasks:
await asyncio.sleep(0)
assert b.tasks[fut2.key].state == "cancelled"
args, kwargs = mocked_gather.call_args
assert fut2.key in kwargs["to_gather"]
await Worker.gather_dep(b, *args, **kwargs)
assert fut2.key not in b.tasks
f2_story = b.story(fut2.key)
assert f2_story
assert not any("missing-dep" in msg for msg in b.story(fut2.key))
await fut3
@gen_cluster(
client=True,
config={
"distributed.comm.recent-messages-log-length": 1000,
},
)
async def test_gather_dep_no_longer_in_flight_tasks(c, s, a, b):
import distributed
with mock.patch.object(distributed.worker.Worker, "gather_dep") as mocked_gather:
fut1 = c.submit(inc, 1, workers=[a.address], key="f1")
fut2 = c.submit(sum, fut1, fut1, workers=[b.address], key="f2")
fut1_key = fut1.key
await _wait_for_state(fut1_key, b, "flight")
while not mocked_gather.call_args:
await asyncio.sleep(0)
fut2.release()
while fut2.key in b.tasks:
await asyncio.sleep(0)
assert b.tasks[fut1.key].state == "cancelled"
args, kwargs = mocked_gather.call_args
await Worker.gather_dep(b, *args, **kwargs)
assert fut2.key not in b.tasks
f1_story = b.story(fut1.key)
assert f1_story
assert not any("missing-dep" in msg for msg in b.story(fut2.key))
@pytest.mark.parametrize("intermediate_state", ["resumed", "cancelled"])
@pytest.mark.parametrize("close_worker", [False, True])
@gen_cluster(client=True, nthreads=[("", 1)] * 3)
async def test_deadlock_cancelled_after_inflight_before_gather_from_worker(
c, s, a, b, x, intermediate_state, close_worker
):
"""If a task was transitioned to in-flight, the gather-dep coroutine was
scheduled but a cancel request came in before gather_data_from_worker was
issued this might corrupt the state machine if the cancelled key is not
properly handled"""
fut1 = c.submit(slowinc, 1, workers=[a.address], key="f1")
fut1B = c.submit(slowinc, 2, workers=[x.address], key="f1B")
fut2 = c.submit(sum, [fut1, fut1B], workers=[x.address], key="f2")
await fut2
with mock.patch.object(distributed.worker.Worker, "gather_dep") as mocked_gather:
fut3 = c.submit(inc, fut2, workers=[b.address], key="f3")
fut2_key = fut2.key
await _wait_for_state(fut2_key, b, "flight")
s.set_restrictions(worker={fut1B.key: a.address, fut2.key: b.address})
while not mocked_gather.call_args:
await asyncio.sleep(0)
await s.remove_worker(address=x.address, safe=True, close=close_worker)
await _wait_for_state(fut2_key, b, intermediate_state)
args, kwargs = mocked_gather.call_args
await Worker.gather_dep(b, *args, **kwargs)
await fut3
|
py
|
1a5a14d3fbf5c52b5a196008697155efc5796235
|
# Copyright 2018 Capital One Services, LLC
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
import logging
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
from c7n_azure import constants
from c7n_azure.actions.logic_app import LogicAppAction
from azure.mgmt.resourcegraph.models import QueryRequest
from c7n_azure.actions.notify import Notify
from c7n_azure.filters import ParentFilter
from c7n_azure.provider import resources
from c7n.actions import ActionRegistry
from c7n.exceptions import PolicyValidationError
from c7n.filters import FilterRegistry
from c7n.manager import ResourceManager
from c7n.query import sources, MaxResourceLimit
from c7n.utils import local_session
log = logging.getLogger('custodian.azure.query')
class ResourceQuery:
def __init__(self, session_factory):
self.session_factory = session_factory
def filter(self, resource_manager, **params):
m = resource_manager.resource_type
enum_op, list_op, extra_args = m.enum_spec
if extra_args:
params.update(extra_args)
params.update(m.extra_args(resource_manager))
try:
op = getattr(getattr(resource_manager.get_client(), enum_op), list_op)
result = op(**params)
if isinstance(result, Iterable):
return [r.serialize(True) for r in result]
elif hasattr(result, 'value'):
return [r.serialize(True) for r in result.value]
except Exception as e:
log.error("Failed to query resource.\n"
"Type: azure.{0}.\n"
"Error: {1}".format(resource_manager.type, e))
raise
raise TypeError("Enumerating resources resulted in a return"
"value which could not be iterated.")
@staticmethod
def resolve(resource_type):
if not isinstance(resource_type, type):
raise ValueError(resource_type)
else:
m = resource_type
return m
@sources.register('describe-azure')
class DescribeSource:
resource_query_factory = ResourceQuery
def __init__(self, manager):
self.manager = manager
self.query = self.resource_query_factory(self.manager.session_factory)
def validate(self):
pass
def get_resources(self, query):
return self.query.filter(self.manager)
def get_permissions(self):
return ()
def augment(self, resources):
return resources
@sources.register('resource-graph')
class ResourceGraphSource:
def __init__(self, manager):
self.manager = manager
def validate(self):
if not hasattr(self.manager.resource_type, 'resource_type'):
raise PolicyValidationError(
"%s is not supported with the Azure Resource Graph source."
% self.manager.data['resource'])
def get_resources(self, _):
log.warning('The Azure Resource Graph source '
'should not be used in production scenarios at this time.')
session = self.manager.get_session()
client = session.client('azure.mgmt.resourcegraph.ResourceGraphClient')
# empty scope will return all resource
query_scope = ""
if self.manager.resource_type.resource_type != 'armresource':
query_scope = "where type =~ '%s'" % self.manager.resource_type.resource_type
query = QueryRequest(
query=query_scope,
subscriptions=[session.get_subscription_id()]
)
res = client.resources(query)
cols = [c['name'] for c in res.data['columns']]
data = [dict(zip(cols, r)) for r in res.data['rows']]
return data
def get_permissions(self):
return ()
def augment(self, resources):
return resources
class ChildResourceQuery(ResourceQuery):
"""A resource query for resources that must be queried with parent information.
Several resource types can only be queried in the context of their
parents identifiers. ie. SQL and Cosmos databases
"""
def filter(self, resource_manager, **params):
"""Query a set of resources."""
m = self.resolve(resource_manager.resource_type) # type: ChildTypeInfo
parents = resource_manager.get_parent_manager()
# Have to query separately for each parent's children.
results = []
for parent in parents.resources():
try:
subset = resource_manager.enumerate_resources(parent, m, **params)
if subset:
# If required, append parent resource ID to all child resources
if m.annotate_parent:
for r in subset:
r[m.parent_key] = parent[parents.resource_type.id]
results.extend(subset)
except Exception as e:
log.warning('Child enumeration failed for {0}. {1}'
.format(parent[parents.resource_type.id], e))
if m.raise_on_exception:
raise e
return results
@sources.register('describe-child-azure')
class ChildDescribeSource(DescribeSource):
resource_query_factory = ChildResourceQuery
class TypeMeta(type):
def __repr__(cls):
return "<Type info service:%s client: %s>" % (
cls.service,
cls.client)
class TypeInfo(metaclass=TypeMeta):
doc_groups = None
"""api client construction information"""
service = ''
client = ''
# Default id field, resources should override if different (used for meta filters, report etc)
id = 'id'
resource = constants.RESOURCE_ACTIVE_DIRECTORY
@classmethod
def extra_args(cls, resource_manager):
return {}
class ChildTypeInfo(TypeInfo, metaclass=TypeMeta):
"""api client construction information for child resources"""
parent_manager_name = ''
annotate_parent = True
raise_on_exception = True
parent_key = 'c7n:parent-id'
@classmethod
def extra_args(cls, parent_resource):
return {}
class QueryMeta(type):
"""metaclass to have consistent action/filter registry for new resources."""
def __new__(cls, name, parents, attrs):
if 'filter_registry' not in attrs:
attrs['filter_registry'] = FilterRegistry(
'%s.filters' % name.lower())
if 'action_registry' not in attrs:
attrs['action_registry'] = ActionRegistry(
'%s.actions' % name.lower())
return super(QueryMeta, cls).__new__(cls, name, parents, attrs)
class QueryResourceManager(ResourceManager, metaclass=QueryMeta):
class resource_type(TypeInfo):
pass
def __init__(self, data, options):
super(QueryResourceManager, self).__init__(data, options)
self.source = self.get_source(self.source_type)
self._session = None
def augment(self, resources):
return resources
def get_permissions(self):
return ()
def get_source(self, source_type):
return sources.get(source_type)(self)
def get_session(self):
if self._session is None:
self._session = local_session(self.session_factory)
return self._session
def get_client(self, service=None):
if not service:
return self.get_session().client(
"%s.%s" % (self.resource_type.service, self.resource_type.client))
return self.get_session().client(service)
def get_cache_key(self, query):
return {'source_type': self.source_type,
'query': query,
'resource': str(self.__class__.__name__)}
@classmethod
def get_model(cls):
return ResourceQuery.resolve(cls.resource_type)
@property
def source_type(self):
return self.data.get('source', 'describe-azure')
def resources(self, query=None):
cache_key = self.get_cache_key(query)
resources = None
if self._cache.load():
resources = self._cache.get(cache_key)
if resources is not None:
self.log.debug("Using cached %s: %d" % (
"%s.%s" % (self.__class__.__module__,
self.__class__.__name__),
len(resources)))
if resources is None:
resources = self.augment(self.source.get_resources(query))
self._cache.save(cache_key, resources)
resource_count = len(resources)
resources = self.filter_resources(resources)
# Check if we're out of a policies execution limits.
if self.data == self.ctx.policy.data:
self.check_resource_limit(len(resources), resource_count)
return resources
def check_resource_limit(self, selection_count, population_count):
"""Check if policy's execution affects more resources then its limit.
"""
p = self.ctx.policy
max_resource_limits = MaxResourceLimit(p, selection_count, population_count)
return max_resource_limits.check_resource_limits()
def get_resources(self, resource_ids, **params):
resource_client = self.get_client()
m = self.resource_type
get_client, get_op, extra_args = m.get_spec
if extra_args:
params.update(extra_args)
op = getattr(getattr(resource_client, get_client), get_op)
data = [
op(rid, **params)
for rid in resource_ids
]
return [r.serialize(True) for r in data]
@staticmethod
def register_actions_and_filters(registry, resource_class):
resource_class.action_registry.register('notify', Notify)
if 'logic-app' not in resource_class.action_registry:
resource_class.action_registry.register('logic-app', LogicAppAction)
def validate(self):
self.source.validate()
class ChildResourceManager(QueryResourceManager, metaclass=QueryMeta):
child_source = 'describe-child-azure'
parent_manager = None
@property
def source_type(self):
source = self.data.get('source', self.child_source)
if source == 'describe':
source = self.child_source
return source
def get_parent_manager(self):
if not self.parent_manager:
self.parent_manager = self.get_resource_manager(self.resource_type.parent_manager_name)
return self.parent_manager
def get_session(self):
if self._session is None:
session = super(ChildResourceManager, self).get_session()
if self.resource_type.resource != constants.RESOURCE_ACTIVE_DIRECTORY:
session = session.get_session_for_resource(self.resource_type.resource)
self._session = session
return self._session
def enumerate_resources(self, parent_resource, type_info, **params):
client = self.get_client()
enum_op, list_op, extra_args = self.resource_type.enum_spec
# There are 2 types of extra_args:
# - static values stored in 'extra_args' dict (e.g. some type)
# - dynamic values are retrieved via 'extra_args' method (e.g. parent name)
if extra_args:
params.update({key: extra_args[key](parent_resource) for key in extra_args.keys()})
params.update(type_info.extra_args(parent_resource))
# Some resources might not have enum_op piece (non-arm resources)
if enum_op:
op = getattr(getattr(client, enum_op), list_op)
else:
op = getattr(client, list_op)
result = op(**params)
if isinstance(result, Iterable):
return [r.serialize(True) for r in result]
elif hasattr(result, 'value'):
return [r.serialize(True) for r in result.value]
raise TypeError("Enumerating resources resulted in a return"
"value which could not be iterated.")
@staticmethod
def register_child_specific(registry, resource_class):
if not issubclass(resource_class, ChildResourceManager):
return
# If Child Resource doesn't annotate parent, there is no way to filter based on
# parent properties.
if resource_class.resource_type.annotate_parent:
resource_class.filter_registry.register('parent', ParentFilter)
resources.subscribe(QueryResourceManager.register_actions_and_filters)
resources.subscribe(ChildResourceManager.register_child_specific)
|
py
|
1a5a14ded39e06694a0331df146f717194765acb
|
"""
Source classes
$Header: /nfs/slac/g/glast/ground/cvs/pointlike/python/uw/like2/sources.py,v 1.53 2018/01/27 15:37:17 burnett Exp $
"""
import os, copy
import numpy as np
from skymaps import SkyDir
from uw.like import Models
from . import response
# convenience adapters
def LogParabola(*pars):
model = Models.LogParabola(p=pars, free=[True,True,False,False])
return model
def PowerLaw(*pars):
model = Models.PowerLaw(p=pars)
return model
def ExpCutoff(*pars):
model = Models.ExpCutoff(p=pars, free=[True, True, False])
return model
def PLSuperExpCutoff(*pars):
model = Models.PLSuperExpCutoff(p=pars, free=[True,True,False,False])
return model
def Constant(*pars, **kw): return Models.Constant(p=pars, **kw)
def FBconstant(f,b, **kw): return Models.FrontBackConstant(f,b, **kw)
def PSR_default(): return Models.PLSuperExpCutoff(p=(1e-13, 1.25, 1500, 0.67), free=[True,True, True, False])
def ismodel(model):
""" check that model is an instance of Models.Model"""
return isinstance(model, Models.Model)
def set_default_bounds( model, force=False):
"""
Handy utility to set bounds for a model from like.Models
force=True to override previously set bounds.
"""
if not force and hasattr(model, 'bounds'):
# model has bounds. Were they set? check to see if all are None
notset = np.all(np.array([np.all(b ==[None,None]) for b in model.bounds]))
if not notset: return
bounds=[]
def to_internal(fun, values):
return [fun(value) if value is not None else None for value in values]
for pname, mp in zip(model.param_names, model.mappers):
plim = (None,None)
try:
plim = dict(
Index=(0.0, 3.5),
Norm=(10**-18, 10**-7),
Scale=(0.001, 4.0),
beta=(-0.1, 1.),
Cutoff=(100., 2e5),
)[pname.split('_')[0]]
except: pass
bounds.append( to_internal(mp.tointernal, plim) )
model.bounds = np.array(bounds) # convert to array so can mask with free
class Source(object):
""" base class for all pointlike/like2 sources
Subclasses are:
PointSource
ExtendedSource
GlobalSource
All instances have the folloiwng properties:
* model, a Models.Model object
* skydir : [skymaps.Skydir | None]
Subclasses must implement a function response(band), which, given a EnergyBand parameter,
returns a Response object appropriate for the source. This provides the angular dependence
of the response specific the band energy and event type.
"""
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
self.changed = False # flag for bandlike
assert self.name is not None, 'bad source name'
self.name = str(self.name) # force to be a string
if self.skydir is None:
# global source: keep original model
self.free = np.array(self.model.free).copy() if self.model is not None else None # save copy of initial free array to restore
return
elif hasattr(self.skydir, '__iter__'): #allow a tuple of (ra,dec)
self.skydir = SkyDir(*self.skydir)
if 'model' not in kwargs or self.model is None:
self.model=LogParabola(1e-14, 2.2, 0, 1e3)
self.model.free[2:]=False
elif type(self.model)==str:
try:
t =eval(self.model)
except Exception, exp:
print 'Failed to evaluate model expression, %s: %s' %(self.model, exp)
raise
self.model=t
if self.model.name=='PowerLaw':
# convert from PowerLaw to LogParabola
par,sig = self.model.statistical()
free = self.model.free[:]
self.model = LogParabola(*(list(par)+[0, self.model.e0]))
self.model.free[:2] = free
self.model.free[2:] = False
elif self.model.name=='ExpCutoff':
try:
print 'converting %s to PLSuperExpCutoff' %self.name
self.model = self.model.create_super_cutoff()
except FloatingPointError:
print 'Failed'
elif self.model.name=='PowerLawFlux':
f, gamma = self.model.get_all_parameters() #10**self.model.p
emin = self.model.emin
try:
self.model=LogParabola(f*(gamma-1)/emin, gamma, 0, emin)
except Exception, msg:
print 'Failed to create LogParabola for source %s, pars= %s'% (self.name, (f,gamma,emin))
raise
self.model.free[2:]=False
elif self.model.name=='LogParabola':
#what was this for?
#if hasattr(self, 'free') and len(self.free)>3: self.free[3]=False
if sum(self.model.free)==4:
# do not allow all parameters to be free: freeze E_break if so
self.model.free[-1]=False
elif sum(self.model.free)==2 and not self.model.free[1]:
# undo freezing
print'Unfreezing E_break for source %s' % self.name
self.model.free[-1]=True
if self.model.name not in ['LogParabola','PLSuperExpCutoff','ExpCutoff', 'Constant']:
raise Exception('model %s not supported' % self.model.name)
#self.free = self.model.free.copy()
if not hasattr(self.model, 'npar'):
raise Exception('model %s for source %s was not converted to new format'\
% (self.model.name, self.name))
# finally, add bounds to the models object, ignoring similar capability in Models.
set_default_bounds( self.model )
def get_spectral_model(self):
return self.model
def set_spectral_model(self, newmodel):
t =self.model
self.model = newmodel
return t
spectral_model = property(get_spectral_model, set_spectral_model)
def freeze(self, parname, value=None):
self.model.freeze(parname)
if value is not None: self.model.setp(parname, value)
self.changed=True
#assert sum(self.model.free)>0, 'cannot freeze all parameters this way'
def thaw(self, parname):
self.model.freeze(parname, freeze=False)
self.changed = True
def __str__(self):
return '\tname : %s\n\tskydir: %s\n\tmodel : %s\n\t\t%s' %\
(self.name, self.skydir, self.model.name, self.model.__str__(indent='\t\t'))
def __repr__(self):
return '%s.%s: \n%s' % (self.__module__,self.__class__.__name__ , self.__str__())
@property
def isextended(self):
return hasattr(self, 'dmodel') and not self.isglobal
@property
def isglobal(self):
return self.skydir is None
class PointSource(Source):
def __init__(self, **kwargs):
kwargs.update(spatial_model=None) # allow test for extent (no extent!)
super(PointSource, self).__init__(**kwargs)
def near(self, otherdir, distance=10):
return self.skydir.difference(otherdir) < np.radians(distance)
def copy(self):
""" return a new PointSource object, with a copy of the model, others"""
ret = PointSource(**self.__dict__)
ret.model = self.model.copy()
return ret
def response(self, band, roi=None, **kwargs):
return response.PointResponse(self, band, roi, **kwargs)
class ExtendedSource(Source):
#def __str__(self):
# return self.name + ' '+ self.model.name \
# + (' (free)' if np.any(self.model.free) else ' (fixed)')
def __str__(self):
return '\tname : %s\n\tskydir: %s\n\tSpatial : %s\n\tmodel : %s\n\t\t%s' %\
(self.name, self.skydir, self.dmodel.name, self.model.name, self.model.__str__(indent='\t\t'))
def near(self, otherdir, distance=10):
return self.skydir.difference(otherdir) < np.radians(distance)
def copy(self):
""" return a new ExtendSource object, with a copy of the model object"""
ret = ExtendedSource(**self.__dict__)
ret.model = self.model.copy()
if ret.model.name=='LogParabola':
ret.model.free[-1]=False # make sure Ebreak is frozen
return ret
def response(self, band, roi=None, **kwargs):
""" return a Respose object, which, given a band, can create a convolved image
and calculate expected counts
"""
return response.ExtendedResponse(self, band, roi, **kwargs)
class GlobalSource(Source):
def __init__(self, **kwargs):
super(GlobalSource, self).__init__(**kwargs)
self.dmodel= kwargs.get('dmodel', None)
assert self.skydir is None # used as a flag
# Special option "free_diffuse" from config['input_model'] to free spectral model for diffuse sources
free = kwargs.pop('free', None) # expect a list of names
if free is not None and self.name in free:
self.model.free[0]=True
if self.model.name=='PowerLaw': self.model.free[1]=True,
#print '{}, free={}'.format(self, free)
def copy(self):
""" return a new GlobalSource object, with a copy of the model, others"""
ret = GlobalSource(**self.__dict__)
ret.model = self.model.copy()
return ret
def response(self, band, roi=None, **kwargs):
""" return a Response class for the band"""
assert self.dmodel, 'Need DiffuseBase object to determine response'
try:
resp_class = dict(
Isotropic =response.IsotropicResponse,
MapCube =response.DiffuseResponse,
CachedMapCube=response.CachedDiffuseResponse,
Healpix =response.DiffuseResponse,
HealpixCube = response.DiffuseResponse,
FitsMapCube = response.DiffuseResponse,
FitsMapCubeList = response.DiffuseResponse,
IsotropicSpectralFunction = response.IsotropicResponse,
AziLimb = response.IsotropicResponse,
GulliLimb = response.IsotropicResponse,
)[self.dmodel.type]
except Exception, msg:
raise Exception('Could not find a response class for source %s:"%s"' %(self,msg))
try:
return resp_class(self,band,roi, **kwargs)
except Exception: # assume no overlap
raise
return response.NoResponse(self, band, roi, **kwargs)
|
py
|
1a5a16bacd313485fc4563ef57378e7c032f104b
|
model = dict(
type='PAN',
backbone=dict(
type='resnet18',
pretrained=True
),
neck=dict(
type='FPEM_v1',
in_channels=(64, 128, 256, 512),
out_channels=128
),
detection_head=dict(
type='PA_Head',
in_channels=512,
hidden_dim=128,
num_classes=6,
loss_text=dict(
type='DiceLoss',
loss_weight=1.0
),
loss_kernel=dict(
type='DiceLoss',
loss_weight=0.5
),
loss_emb=dict(
type='EmbLoss_v1',
feature_dim=4,
loss_weight=0.25
)
)
)
data = dict(
batch_size=8,
train=dict(
type='PAN_IC15',
split='train',
is_transform=True,
img_size=736,
short_size=736,
kernel_scale=0.5,
read_type='cv2'
),
test=dict(
type='PAN_IC15',
split='test',
short_size=736,
read_type='cv2'
)
)
train_cfg = dict(
lr=1e-3,
schedule='polylr',
epoch=600,
optimizer='Adam'
)
test_cfg = dict(
min_score=0.7,
min_area=10,
bbox_type='rect',
result_path='outputs/submit_ic15.zip'
)
|
py
|
1a5a1706622118674c5f5c4f90c02263db93328c
|
from azure.cosmosdb.table.tableservice import TableService
from azure.cosmosdb.table.models import Entity, EntityProperty, EdmType
from database.models.Datum import Datum
from string import Template
import uuid
def generateRowKey():
return str(uuid.uuid4())
class DatumRepository:
def __init__(self):
self.tableService = TableService(connection_string='DefaultEndpointsProtocol=https;AccountName=styles-db;AccountKey=GKnYYUiWGAPVQuu7qjqPDUrfESoMQLrQ2YZmAahqW6WnSkwICAxd8yj3G2OlZMA27VPVmAECrcrBwq8bJfmjXg==;TableEndpoint=https://styles-db.table.cosmos.azure.com:443/;')
self.tableName = 'dataset'
self.PartitionKey = 'dataset'
# Returns the created Entity object
def create(self, datum):
entity = Entity()
entity.PartitionKey = self.PartitionKey
entity.RowKey = generateRowKey()
entity.blobName = EntityProperty(EdmType.STRING, datum.blobName)
entity.contrast = EntityProperty(EdmType.DOUBLE, datum.contrast)
entity.brightness = EntityProperty(EdmType.DOUBLE, datum.brightness)
entity.temperature = EntityProperty(EdmType.DOUBLE, datum.temperature)
entity.saturation = EntityProperty(EdmType.DOUBLE, datum.saturation)
return self.tableService.insert_entity(self.tableName, entity)
# Returns either an Entity or a list of Entity objects
def read(self, RowKey = None):
if RowKey is None:
# Get all
queryTemplate = Template("PartitionKey eq '$PartitionKey'")
result = self.tableService.query_entities(self.tableName, filter=queryTemplate.substitute(PartitionKey=self.PartitionKey))
result = [Datum(item) for item in result]
return result
# Get by id
result = self.tableService.get_entity(self.tableName, self.PartitionKey, RowKey)
result = Datum(result)
return result
# Returns the updated Entity object
def update(self, entity):
self.tableService.update_entity(self.tableName, entity)
# Returns a succeeded bool
def delete(self, RowKey):
self.tableService.delete_entity(self.tableName, self.PartitionKey, RowKey)
|
py
|
1a5a17f3fa8df495d704265a1e4c8af70be044e1
|
"""Implementations for torch.nn.functional equivalent for MPC."""
# stdlib
from typing import Optional
from typing import Tuple
from typing import Union
# third party
import numpy as np
import torch
from sympc.session import get_session
from sympc.tensor import MPCTensor
from sympc.tensor import ShareTensor
from sympc.utils import parallel_execution
def relu(x: MPCTensor) -> MPCTensor:
"""Rectified linear unit function.
Args:
x (MPCTensor): The tensor on which we apply the function
Returns:
An MPCTensor which represents the ReLu applied on the input tensor
"""
res = x * (x >= 0)
return res
def mse_loss(pred: MPCTensor, target: MPCTensor, reduction: str = "mean") -> MPCTensor:
"""Mean Squared Error loss.
Args:
pred (MPCTensor): The predictions obtained
target (MPCTensor): The target values
reduction (str): the reduction method, default is `mean`
Returns:
The loss
Raises:
ValueError: If `reduction` not in supported methods
"""
if reduction == "mean":
result = (pred - target).pow(2).sum() / pred.shape[0]
elif reduction == "sum":
result = (pred - target).pow(2).sum()
else:
raise ValueError("do not support reduction method: %s" % reduction)
return result
Kernel2D = Tuple[int, int]
Stride2D = Tuple[int, int]
Padding2D = Tuple[int, int]
Dilation2D = Tuple[int, int]
MaxPool2DArgs = Tuple[Kernel2D, Stride2D, Padding2D, Dilation2D]
def _sanity_check_max_pool2d(
kernel_size: Union[int, Tuple[int, int]],
stride: Optional[Union[int, Tuple[int, int]]] = None,
padding: Union[int, Tuple[int, int]] = 0,
dilation: Union[int, Tuple[int, int]] = 1,
) -> MaxPool2DArgs:
"""Sanity check the parameters required for max_pool2d (backward and forward pass).
Args:
kernel_size (Union[int, Tuple[int, int]]): the kernel size
in case it is passed as an integer then that specific value is used for height and width
stride (Union[int, Tuple[int, int]]): the stride size
in case it is passed as an integer then that specific value is used for height and width
padding (Union[int, Tuple[int, int]]): the padding size
in case it is passed as an integer then that specific value is used for height and width
dilation (Union[int, Tuple[int, int]]): the dilation size
in case it is passed as an integer then that specific value is used for height and width
Returns:
A 4 element type with types Tuple[int, int] representing the converted parameters.
Raises:
ValueError: if the parameters are not passing the sanity check
"""
if isinstance(kernel_size, int):
kernel_size = (kernel_size, kernel_size)
if len(kernel_size) != 2:
raise ValueError("Kernel_size should have only 2 dimensions")
if stride is None:
stride = kernel_size
if isinstance(stride, int):
stride = (stride, stride)
if len(stride) != 2:
raise ValueError("Stride should have only 2 dimensions")
if isinstance(padding, int):
padding = (padding, padding)
if padding[0] > kernel_size[0] or padding[1] > kernel_size[1]:
raise ValueError("Padding should be <= kernel_size / 2")
if len(padding) != 2:
raise ValueError("Padding should have only 2 dimensions")
if isinstance(dilation, int):
dilation = (dilation, dilation)
if len(dilation) != 2:
raise ValueError("Dilation should have only 2 dimensions")
if dilation[0] != 1 or dilation[1] != 1:
raise ValueError("Supported only dilation == 1")
return kernel_size, stride, padding, dilation
def _reshape_max_pool2d(
x: MPCTensor,
kernel_size: Tuple[int, int],
stride: Tuple[int, int],
padding: Tuple[int, int],
dilation: Tuple[int, int],
) -> MPCTensor:
"""Prepare the share tensors by calling the reshape function in parallel at each party.
Args:
x (MPCTensor): the MPCTensor on which to apply the reshape operation
kernel_size (Tuple[int, int]): the kernel size
stride (Tuple[int, int]): the stride size
padding (Tuple[int, int]): the padding size
dilation (Tuple[int, int]): the dilation size
Returns:
The reshaped MPCTensor.
"""
session = x.session
args = [[share, kernel_size, stride, padding, dilation] for share in x.share_ptrs]
shares = parallel_execution(helper_max_pool2d_reshape, session.parties)(args)
res_shape = shares[0].shape.get()
res = MPCTensor(shares=shares, session=session, shape=res_shape)
return res
def helper_max_pool2d_reshape(
x: ShareTensor,
kernel_size: Tuple[int, int],
stride: Tuple[int, int],
padding: Tuple[int, int],
dilation: Tuple[int, int],
) -> ShareTensor:
"""Function that runs at each party for preparing the share.
Reshape each share tensor to prepare it for calling 'argmax'.
The new share would have "each element" as the input on which we
will run the max_pool2d kernel.
Args:
x (ShareTensor): the ShareTensor on which to apply the reshaping
kernel_size (Tuple[int, int]): the kernel size
stride (Tuple[int, int]): the stride size
padding (Tuple[int, int]): the padding size
dilation (Tuple[int, int]): the dilation size
Returns:
The prepared share tensor (reshaped)
"""
session = get_session(x.session_uuid)
tensor = x.tensor.numpy()
padding = [(0, 0)] * len(tensor.shape[:-2]) + [
(padding[0], padding[0]),
(padding[1], padding[1]),
]
tensor_type = session.tensor_type
padding_value = 0
if session.rank == 0:
# ATTENTION: Min value for max_pool2d that works -25
padding_value = -25
tensor = np.pad(tensor, padding, mode="constant", constant_values=padding_value)
output_shape = tensor.shape[:-2]
output_shape += (
(tensor.shape[-2] - kernel_size[0]) // stride[0] + 1,
(tensor.shape[-1] - kernel_size[1]) // stride[1] + 1,
)
output_shape += kernel_size
output_strides = tensor.strides[:-2]
output_strides += (stride[0] * tensor.strides[-2], stride[1] * tensor.strides[-1])
output_strides += tensor.strides[-2:]
window_view_share = torch.tensor(
np.lib.stride_tricks.as_strided(
tensor, shape=output_shape, strides=output_strides
),
dtype=tensor_type,
)
window_view_share = window_view_share.reshape(-1, *kernel_size)
res_share = ShareTensor(config=x.config)
res_share.tensor = window_view_share
return res_share
def max_pool2d(
x: MPCTensor,
kernel_size: Union[int, Tuple[int, int]],
stride: Optional[Union[int, Tuple[int, int]]] = None,
padding: Union[int, Tuple[int, int]] = 0,
dilation: Union[int, Tuple[int, int]] = 1,
return_indices: bool = False,
) -> Union[MPCTensor, Tuple[MPCTensor, MPCTensor]]:
"""Compute the max pool for a tensor with 2 dimension.
Args:
x (MPCTensor): the MPCTensor on which to apply the operation
kernel_size (Union[int, Tuple[int, int]]): the kernel size
in case it is passed as an integer then that specific value is used for height and width
stride (Union[int, Tuple[int, int]]): the stride size
in case it is passed as an integer then that specific value is used for height and width
padding (Union[int, Tuple[int, int]]): the padding size
in case it is passed as an integer then that specific value is used for height and width
dilation (Union[int, Tuple[int, int]]): the dilation size
in case it is passed as an integer then that specific value is used for height and width
return_indices (bool): to return the indices of the max values
Returns:
A tuple representing maximum values and the indices (as a one hot encoding
Raises:
ValueError: if the kernel size is bigger than the input
"""
kernel_size, stride, padding, dilation = _sanity_check_max_pool2d(
kernel_size, stride, padding, dilation
)
if (
x.shape[-2] + 2 * padding[0] < kernel_size[0]
or x.shape[-1] + 2 * padding[1] < kernel_size[1]
):
raise ValueError(
f"Kernel size ({kernel_size}) has more elements on an axis than "
f"input shape ({x.shape}) considering padding of {padding}"
)
x_reshaped = _reshape_max_pool2d(
x, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation
)
res_max_columns, columns = x_reshaped.max(dim=-1, one_hot=True)
res_max, rows = res_max_columns.max(dim=-1, one_hot=True)
output_shape = x.shape[:-2] + (
(x.shape[-2] - kernel_size[0] + 2 * padding[0]) // stride[0] + 1,
(x.shape[-1] - kernel_size[1] + 2 * padding[1]) // stride[1] + 1,
)
res = res_max.reshape(*output_shape)
if return_indices:
indices = columns * rows.unsqueeze(-1)
res = (res, indices.reshape(output_shape + kernel_size))
return res
def max_pool2d_backward_helper(
input_shape: Tuple[int],
grads_share: ShareTensor,
kernel_size: Tuple[int, int],
stride: Tuple[int, int],
padding: Tuple[int, int],
) -> ShareTensor:
"""Helper function to compute the gradient needed to be passed to the parent node.
Args:
input_shape (Tuple[int]): the size of the input tensor when running max_pool2d
grads_share (ShareTensor): the share for the output gradient specific to this party
kernel_size (Tuple[int, int]): the kernel size
stride (Tuple[int, int]): the stride size
padding (Tuple[int, int]): the padding size
Returns:
A ShareTensor specific for the computed gradient
Raises:
ValueError: if the input shape (taken into consideration the padding) is smaller than the
kernel shape
"""
session = get_session(str(grads_share.session_uuid))
res_shape = input_shape[:-2]
res_shape += (input_shape[-2] + 2 * padding[0], input_shape[-1] + 2 * padding[1])
if res_shape[-2] < kernel_size[0] or res_shape[-1] < kernel_size[1]:
raise ValueError(
f"Kernel size ({kernel_size}) has more elements on an axis than "
f"input shape ({res_shape}) considering padding of {padding}"
)
tensor_type = session.tensor_type
tensor = torch.zeros(res_shape, dtype=tensor_type)
for i in range((res_shape[-2] - kernel_size[0]) // stride[0] + 1):
row_idx = i * stride[0]
for j in range((res_shape[-1] - kernel_size[1]) // stride[1] + 1):
col_idx = j * stride[1]
if len(res_shape) == 4:
tensor[
:,
:,
row_idx : row_idx + kernel_size[0],
col_idx : col_idx + kernel_size[1],
] += grads_share.tensor[:, :, i, j]
else:
tensor[
:,
row_idx : row_idx + kernel_size[0],
col_idx : col_idx + kernel_size[1],
] += grads_share.tensor[:, i, j]
if len(res_shape) == 4:
tensor = tensor[
:, :, padding[0] : input_shape[-2], padding[1] : input_shape[-1]
]
else:
tensor = tensor[
:,
padding[0] : res_shape[-2] - padding[0],
padding[1] : res_shape[-1] - padding[1],
]
res = ShareTensor(config=grads_share.config)
res.tensor = tensor
return res
def max_pool2d_backward(
grad: MPCTensor,
input_shape: Tuple[int],
indices: MPCTensor,
kernel_size: Union[int, Tuple[int, int]],
stride: Optional[Union[int, Tuple[int, int]]] = None,
padding: Union[int, Tuple[int, int]] = 0,
dilation: Union[int, Tuple[int, int]] = 1,
) -> MPCTensor:
"""Helper function for the backwards step for max_pool2d.
Credits goes to the CrypTen team.
Args:
grad (MPCTensor): gradient that comes from the child node
input_shape (Tuple[int]): the shape of the input when the max_pool2d was run
indices (MPCTensor): the indices where the maximum value was found in the input
kernel_size (Union[int, Tuple[int, int]]): the kernel size
in case it is passed as an integer then that specific value is used for height and width
stride (Union[int, Tuple[int, int]]): the stride size
in case it is passed as an integer then that specific value is used for height and width
padding (Union[int, Tuple[int, int]]): the padding size
in case it is passed as an integer then that specific value is used for height and width
dilation (Union[int, Tuple[int, int]]): the dilation size
in case it is passed as an integer then that specific value is used for height and width
Returns:
The gradient that should be backpropagated (MPCTensor)
Raises:
ValueError: In case some of the values for the parameters are not supported
"""
kernel_size, stride, padding, dilation = _sanity_check_max_pool2d(
kernel_size, stride, padding, dilation
)
if len(grad.shape) != 4 and len(grad.shape) != 3:
raise ValueError(
f"Expected gradient to have 3/4 dimensions (4 with batch). Found {len(grad.shape)}"
)
if len(indices.shape) != len(grad.shape) + 2:
raise ValueError(
"Expected indices shape to have 2 extra dimensions because of "
f"(kernel_size, kernel_size), but has {len(indices.shape)}"
)
session = grad.session
mappings = grad.view(grad.shape + (1, 1)) * indices
args = [
[tuple(input_shape), grads_share, kernel_size, stride, padding]
for grads_share in mappings.share_ptrs
]
shares = parallel_execution(max_pool2d_backward_helper, session.parties)(args)
res = MPCTensor(shares=shares, shape=input_shape, session=session)
return res
|
py
|
1a5a18b41de570bde4d8409713fef1adbfc821c6
|
# Checks for an absolute error
# with an error of at most 1e-7
# Don't edit this file. Edit real_abs_rel_template.py instead, and then run _real_check_gen.py
from itertools import zip_longest
from decimal import Decimal, InvalidOperation
from kg.checkers import * ### @import
EPS = Decimal('1e-7')
EPS *= 1+Decimal('1e-5') # add some leniency
@set_checker()
@default_score
def checker(input_file, output_file, judge_file, **kwargs):
worst = 0
for line1, line2 in zip_longest(output_file, judge_file):
if (line1 is None) != (line2 is None): raise WA("Unequal number of lines")
p1 = line1.rstrip().split(" ")
p2 = line2.rstrip().split(" ")
if len(p1) != len(p2): raise WA("Incorrect number of values in line")
for v1, v2 in zip(p1, p2):
if v1 != v2: # they're different as tokens. try considering them as numbers
try:
err = abs_error(Decimal(v1), Decimal(v2))
except InvalidOperation:
raise WA(f"Unequal tokens that are not numbers: {v1!r} != {v2!r}")
worst = max(worst, err)
if err > EPS:
print('Found an error of', worst) ### @if format not in ('hr', 'cms')
raise WA("Bad precision.")
print('Worst error:', worst) ### @if format not in ('pg', 'hr', 'cms')
help_ = ('Compare if two sequences of real numbers are "close enough" (by 1e-7). '
"Uses absolute error.")
if __name__ == '__main__': chk(help=help_)
|
py
|
1a5a1b4d8afcb823bfa9dce34665d269225426d0
|
# Copyright (C) 2020-2021 Intel Corporation
#
# SPDX-License-Identifier: MIT
from enum import Enum, auto
from math import gcd
import copy
import logging as log
import numpy as np
from datumaro.components.cli_plugin import CliPlugin
from datumaro.components.extractor import (
DEFAULT_SUBSET_NAME, AnnotationType, Transform,
)
from datumaro.util import cast
NEAR_ZERO = 1e-7
class SplitTask(Enum):
classification = auto()
detection = auto()
segmentation = auto()
reid = auto()
class Split(Transform, CliPlugin):
"""
- classification split |n
Splits dataset into subsets(train/val/test) in class-wise manner. |n
Splits dataset images in the specified ratio, keeping the initial class
distribution.|n
|n
- detection & segmentation split |n
Each image can have multiple object annotations -
(bbox, mask, polygon). Since an image shouldn't be included
in multiple subsets at the same time, and image annotations
shouldn't be split, in general, dataset annotations are unlikely
to be split exactly in the specified ratio. |n
This split tries to split dataset images as close as possible
to the specified ratio, keeping the initial class distribution.|n
|n
- reidentification split |n
In this task, the test set should consist of images of unseen
people or objects during the training phase. |n
This function splits a dataset in the following way:|n
1. Splits the dataset into 'train + val' and 'test' sets|n
|s|sbased on person or object ID.|n
2. Splits 'test' set into 'test-gallery' and 'test-query' sets|n
|s|sin class-wise manner.|n
3. Splits the 'train + val' set into 'train' and 'val' sets|n
|s|sin the same way.|n
The final subsets would be
'train', 'val', 'test-gallery' and 'test-query'. |n
|n
Notes:|n
- Each image is expected to have only one Annotation. Unlabeled or
multi-labeled images will be split into subsets randomly. |n
- If Labels also have attributes, also splits by attribute values.|n
- If there is not enough images in some class or attributes group,
the split ratio can't be guaranteed.|n
In reidentification task, |n
- Object ID can be described by Label, or by attribute (--attr parameter)|n
- The splits of the test set are controlled by '--query' parameter |n
|s|sGallery ratio would be 1.0 - query.|n
|n
Example:|n
|s|s%(prog)s -t classification --subset train:.5 --subset val:.2 --subset test:.3 |n
|s|s%(prog)s -t detection --subset train:.5 --subset val:.2 --subset test:.3 |n
|s|s%(prog)s -t segmentation --subset train:.5 --subset val:.2 --subset test:.3 |n
|s|s%(prog)s -t reid --subset train:.5 --subset val:.2 --subset test:.3 --query .5 |n
Example: use 'person_id' attribute for splitting|n
|s|s%(prog)s --attr person_id
"""
_default_split = [("train", 0.5), ("val", 0.2), ("test", 0.3)]
_default_query_ratio = 0.5
@classmethod
def build_cmdline_parser(cls, **kwargs):
parser = super().build_cmdline_parser(**kwargs)
parser.add_argument(
"-t",
"--task",
default=SplitTask.classification.name,
choices=[t.name for t in SplitTask],
help="(one of {}; default: %(default)s)".format(
", ".join(t.name for t in SplitTask)
),
)
parser.add_argument(
"-s",
"--subset",
action="append",
type=cls._split_arg,
dest="splits",
help="Subsets in the form: '<subset>:<ratio>' "
"(repeatable, default: %s)" % dict(cls._default_split),
)
parser.add_argument(
"--query",
type=float,
default=None,
help="Query ratio in the test set (default: %.3f)"
% cls._default_query_ratio,
)
parser.add_argument(
"--attr",
type=str,
dest="attr_for_id",
default=None,
help="Attribute name representing the ID (default: use label)",
)
parser.add_argument("--seed", type=int, help="Random seed")
return parser
@staticmethod
def _split_arg(s):
parts = s.split(":")
if len(parts) != 2:
import argparse
raise argparse.ArgumentTypeError()
return (parts[0], float(parts[1]))
def __init__(self, dataset, task, splits, query=None, attr_for_id=None, seed=None):
super().__init__(dataset)
if splits is None:
splits = self._default_split
self.task = task
self.splitter = self._get_splitter(
task, dataset, splits, seed, query, attr_for_id
)
self._initialized = False
self._subsets = self.splitter._subsets
@staticmethod
def _get_splitter(task, dataset, splits, seed, query, attr_for_id):
if task == SplitTask.classification.name:
splitter = _ClassificationSplit(dataset=dataset, splits=splits, seed=seed)
elif task in {SplitTask.detection.name, SplitTask.segmentation.name}:
splitter = _InstanceSpecificSplit(
dataset=dataset, splits=splits, seed=seed, task=task
)
elif task == SplitTask.reid.name:
splitter = _ReidentificationSplit(
dataset=dataset,
splits=splits,
seed=seed,
query=query,
attr_for_id=attr_for_id,
)
else:
raise Exception(
f"Unknown task '{task}', available "
f"splitter format: {[a.name for a in SplitTask]}"
)
return splitter
def __iter__(self):
# lazy splitting
if self._initialized is False:
self.splitter._split_dataset()
self._initialized = True
for i, item in enumerate(self._extractor):
yield self.wrap_item(item, subset=self.splitter._find_split(i))
def get_subset(self, name):
# lazy splitting
if self._initialized is False:
self.splitter._split_dataset()
self._initialized = True
return super().get_subset(name)
def subsets(self):
# lazy splitting
if self._initialized is False:
self.splitter._split_dataset()
self._initialized = True
return super().subsets()
class _TaskSpecificSplit:
def __init__(self, dataset, splits, seed, restrict=False):
self._extractor = dataset
snames, sratio, subsets = self._validate_splits(splits, restrict)
self._snames = snames
self._sratio = sratio
self._seed = seed
# remove subset name restriction
# https://github.com/openvinotoolkit/datumaro/issues/194
self._subsets = subsets
self._parts = []
self._length = "parent"
self._initialized = False
def _set_parts(self, by_splits):
self._parts = []
for subset in self._subsets:
self._parts.append((set(by_splits[subset]), subset))
@staticmethod
def _get_uniq_annotations(dataset):
annotations = []
unlabeled_or_multi = []
for idx, item in enumerate(dataset):
labels = [a for a in item.annotations if a.type == AnnotationType.label]
if len(labels) == 1:
annotations.append(labels[0])
else:
unlabeled_or_multi.append(idx)
return annotations, unlabeled_or_multi
@staticmethod
def _validate_splits(splits, restrict=False):
snames = []
ratios = []
subsets = set()
valid = ["train", "val", "test"]
for subset, ratio in splits:
# remove subset name restriction
# https://github.com/openvinotoolkit/datumaro/issues/194
if restrict:
assert subset in valid, "Subset name must be one of %s, got %s" % (
valid,
subset,
)
assert (
0.0 <= ratio and ratio <= 1.0
), "Ratio is expected to be in the range " "[0, 1], but got %s for %s" % (
ratio,
subset,
)
# ignore near_zero ratio because it may produce partition error.
if ratio > NEAR_ZERO:
# handling duplication
if subset in snames:
raise Exception("Subset (%s) is duplicated" % subset)
snames.append(subset)
ratios.append(float(ratio))
subsets.add(subset)
ratios = np.array(ratios)
total_ratio = np.sum(ratios)
if not abs(total_ratio - 1.0) <= NEAR_ZERO:
raise Exception(
"Sum of ratios is expected to be 1, got %s, which is %s"
% (splits, total_ratio)
)
return snames, ratios, subsets
@staticmethod
def _get_required(ratio):
if len(ratio) < 2:
return 1
for scale in [10, 100]:
farray = np.array(ratio) * scale
iarray = farray.astype(int)
if np.array_equal(iarray, farray):
break
# find gcd
common_divisor = iarray[0]
for val in iarray[1:]:
common_divisor = gcd(common_divisor, val)
required = np.sum(np.array(iarray / common_divisor).astype(int))
return required
@staticmethod
def _get_sections(dataset_size, ratio):
n_splits = [int(np.around(dataset_size * r)) for r in ratio[:-1]]
n_splits.append(dataset_size - np.sum(n_splits))
# if there are splits with zero samples even if ratio is not 0,
# borrow one from the split who has one or more.
for ii, num_split in enumerate(n_splits):
if num_split == 0 and NEAR_ZERO < ratio[ii]:
midx = np.argmax(n_splits)
if n_splits[midx] > 0:
n_splits[ii] += 1
n_splits[midx] -= 1
sections = np.add.accumulate(n_splits[:-1])
return sections, n_splits
@staticmethod
def _group_by_attr(items):
"""
Args:
items: list of (idx_img, ann). ann is the annotation from Label object.
Returns:
by_attributes: dict of { combination-of-attrs : list of index }
"""
# float--> numerical, others(int, string, bool) --> categorical
def _is_float(value):
if isinstance(value, str):
casted = cast(value, float)
if casted is not None:
if cast(casted, str) == value:
return True
return False
elif isinstance(value, float):
cast(value, float)
return True
return False
# group by attributes
by_attributes = dict()
for idx_img, ann in items:
# ignore numeric attributes
filtered = {}
for attr, value in ann.attributes.items():
if _is_float(value):
continue
filtered[attr] = value
attributes = tuple(sorted(filtered.items()))
if attributes not in by_attributes:
by_attributes[attributes] = []
by_attributes[attributes].append(idx_img)
return by_attributes
def _split_by_attr(
self, datasets, snames, ratio, out_splits, merge_small_classes=True
):
def _split_indice(indice):
sections, _ = self._get_sections(len(indice), ratio)
splits = np.array_split(indice, sections)
for subset, split in zip(snames, splits):
if 0 < len(split):
out_splits[subset].extend(split)
required = self._get_required(ratio)
rest = []
for _, items in datasets.items():
np.random.shuffle(items)
by_attributes = self._group_by_attr(items)
attr_combinations = list(by_attributes.keys())
np.random.shuffle(attr_combinations) # add randomness
for attr in attr_combinations:
indice = by_attributes[attr]
quo = len(indice) // required
if quo > 0:
filtered_size = quo * required
_split_indice(indice[:filtered_size])
rest.extend(indice[filtered_size:])
else:
rest.extend(indice)
quo = len(rest) // required
if quo > 0:
filtered_size = quo * required
_split_indice(rest[:filtered_size])
rest = rest[filtered_size:]
if not merge_small_classes and len(rest) > 0:
_split_indice(rest)
rest = []
if len(rest) > 0:
_split_indice(rest)
def _split_unlabeled(self, unlabeled, by_splits):
"""
split unlabeled data into subsets (detection, classification)
Args:
unlabeled: list of index of unlabeled or multi-labeled data
by_splits: splits up to now
Returns:
by_splits: final splits
"""
dataset_size = len(self._extractor)
_, n_splits = list(self._get_sections(dataset_size, self._sratio))
counts = [len(by_splits[sname]) for sname in self._snames]
expected = [max(0, v) for v in np.subtract(n_splits, counts)]
sections = np.add.accumulate(expected[:-1])
np.random.shuffle(unlabeled)
splits = np.array_split(unlabeled, sections)
for subset, split in zip(self._snames, splits):
if 0 < len(split):
by_splits[subset].extend(split)
def _find_split(self, index):
for subset_indices, subset in self._parts:
if index in subset_indices:
return subset
return DEFAULT_SUBSET_NAME # all the possible remainder --> default
def _split_dataset(self):
raise NotImplementedError()
class _ClassificationSplit(_TaskSpecificSplit):
"""
Splits dataset into subsets(train/val/test) in class-wise manner. |n
Splits dataset images in the specified ratio, keeping the initial class
distribution.|n
|n
Notes:|n
- Each image is expected to have only one Label. Unlabeled or
multi-labeled images will be split into subsets randomly. |n
- If Labels also have attributes, also splits by attribute values.|n
- If there is not enough images in some class or attributes group,
the split ratio can't be guaranteed.|n
|n
Example:|n
|s|s%(prog)s -t classification --subset train:.5 --subset val:.2 --subset test:.3
"""
def __init__(self, dataset, splits, seed=None):
"""
Parameters
----------
dataset : Dataset
splits : list
A list of (subset(str), ratio(float))
The sum of ratios is expected to be 1.
seed : int, optional
"""
super().__init__(dataset, splits, seed)
def _split_dataset(self):
np.random.seed(self._seed)
# support only single label for a DatasetItem
# 1. group by label
by_labels = dict()
annotations, unlabeled = self._get_uniq_annotations(self._extractor)
for idx, ann in enumerate(annotations):
label = getattr(ann, "label", None)
if label not in by_labels:
by_labels[label] = []
by_labels[label].append((idx, ann))
by_splits = dict()
for subset in self._subsets:
by_splits[subset] = []
# 2. group by attributes
self._split_by_attr(by_labels, self._snames, self._sratio, by_splits)
# 3. split unlabeled data
if len(unlabeled) > 0:
self._split_unlabeled(unlabeled, by_splits)
# 4. set parts
self._set_parts(by_splits)
class _ReidentificationSplit(_TaskSpecificSplit):
"""
Splits a dataset for re-identification task.|n
Produces a split with a specified ratio of images, avoiding having same
labels in different subsets.|n
|n
In this task, the test set should consist of images of unseen
people or objects during the training phase. |n
This function splits a dataset in the following way:|n
1. Splits the dataset into 'train + val' and 'test' sets|n
|s|sbased on person or object ID.|n
2. Splits 'test' set into 'test-gallery' and 'test-query' sets|n
|s|sin class-wise manner.|n
3. Splits the 'train + val' set into 'train' and 'val' sets|n
|s|sin the same way.|n
The final subsets would be
'train', 'val', 'test-gallery' and 'test-query'. |n
|n
Notes:|n
- Each image is expected to have a single Label. Unlabeled or multi-labeled
images will be split into 'not-supported'.|n
- Object ID can be described by Label, or by attribute (--attr parameter)|n
- The splits of the test set are controlled by '--query' parameter. |n
|s|sGallery ratio would be 1.0 - query.|n
|n
Example: split a dataset in the specified ratio, split the test set|n
|s|s|s|sinto gallery and query in 1:1 ratio|n
|s|s%(prog)s -t reidentification --subset train:.5 --subset val:.2 --subset test:.3 --query .5|n
Example: use 'person_id' attribute for splitting|n
|s|s%(prog)s --attr person_id
"""
_default_query_ratio = 0.5
def __init__(self, dataset, splits, query=None, attr_for_id=None, seed=None):
"""
Parameters
----------
dataset : Dataset
splits : list
A list of (subset(str), ratio(float))
Subset is expected to be one of ["train", "val", "test"].
The sum of ratios is expected to be 1.
query : float
The ratio of 'test-query' set.
The ratio of 'test-gallery' set would be 1.0 - query.
attr_for_id: str
attribute name representing the person/object id.
if this is not specified, label would be used.
seed : int, optional
"""
super().__init__(dataset, splits, seed, restrict=True)
if query is None:
query = self._default_query_ratio
assert 0.0 <= query and query <= 1.0, (
"Query ratio is expected to be in the range " "[0, 1], but got %f" % query
)
test_splits = [("test-query", query), ("test-gallery", 1.0 - query)]
# remove subset name restriction
self._subsets = {"train", "val", "test-gallery", "test-query"}
self._test_splits = test_splits
self._attr_for_id = attr_for_id
def _split_dataset(self):
np.random.seed(self._seed)
id_snames, id_ratio = self._snames, self._sratio
attr_for_id = self._attr_for_id
dataset = self._extractor
# group by ID(attr_for_id)
by_id = dict()
annotations, unlabeled = self._get_uniq_annotations(dataset)
if attr_for_id is None: # use label
for idx, ann in enumerate(annotations):
ID = getattr(ann, "label", None)
if ID not in by_id:
by_id[ID] = []
by_id[ID].append((idx, ann))
else: # use attr_for_id
for idx, ann in enumerate(annotations):
attributes = dict(ann.attributes.items())
assert attr_for_id in attributes, (
"'%s' is expected as an attribute name" % attr_for_id
)
ID = attributes[attr_for_id]
if ID not in by_id:
by_id[ID] = []
by_id[ID].append((idx, ann))
required = self._get_required(id_ratio)
if len(by_id) < required:
log.warning(
"There's not enough IDs, which is %s, "
"so train/val/test ratio can't be guaranteed." % len(by_id)
)
# 1. split dataset into trval and test
# IDs in test set should not exist in train/val set.
test = id_ratio[id_snames.index("test")] if "test" in id_snames else 0
if NEAR_ZERO < test: # has testset
split_ratio = np.array([test, 1.0 - test])
IDs = list(by_id.keys())
np.random.shuffle(IDs)
sections, _ = self._get_sections(len(IDs), split_ratio)
splits = np.array_split(IDs, sections)
testset = {pid: by_id[pid] for pid in splits[0]}
trval = {pid: by_id[pid] for pid in splits[1]}
# follow the ratio of datasetitems as possible.
# naive heuristic: exchange the best item one by one.
expected_count = int(
(len(self._extractor) - len(unlabeled)) * split_ratio[0]
)
testset_total = int(np.sum([len(v) for v in testset.values()]))
self._rebalancing(testset, trval, expected_count, testset_total)
else:
testset = dict()
trval = by_id
by_splits = dict()
for subset in self._subsets:
by_splits[subset] = []
# 2. split 'test' into 'test-gallery' and 'test-query'
if 0 < len(testset):
test_snames = []
test_ratio = []
for sname, ratio in self._test_splits:
test_snames.append(sname)
test_ratio.append(float(ratio))
self._split_by_attr(
testset, test_snames, test_ratio, by_splits, merge_small_classes=False
)
# 3. split 'trval' into 'train' and 'val'
trval_snames = ["train", "val"]
trval_ratio = []
for subset in trval_snames:
if subset in id_snames:
val = id_ratio[id_snames.index(subset)]
else:
val = 0.0
trval_ratio.append(val)
trval_ratio = np.array(trval_ratio)
total_ratio = np.sum(trval_ratio)
if total_ratio < NEAR_ZERO:
trval_splits = list(zip(["train", "val"], trval_ratio))
log.warning(
"Sum of ratios is expected to be positive, "
"got %s, which is %s" % (trval_splits, total_ratio)
)
else:
trval_ratio /= total_ratio # normalize
self._split_by_attr(
trval, trval_snames, trval_ratio, by_splits, merge_small_classes=False
)
# split unlabeled data into 'not-supported'.
if len(unlabeled) > 0:
self._subsets.add("not-supported")
by_splits["not-supported"] = unlabeled
self._set_parts(by_splits)
@staticmethod
def _rebalancing(test, trval, expected_count, testset_total):
diffs = dict()
for id_test, items_test in test.items():
count_test = len(items_test)
for id_trval, items_trval in trval.items():
count_trval = len(items_trval)
diff = count_trval - count_test
if diff == 0:
continue # exchange has no effect
if diff not in diffs:
diffs[diff] = [(id_test, id_trval)]
else:
diffs[diff].append((id_test, id_trval))
if len(diffs) == 0: # nothing would be changed by exchange
return
exchanges = []
while True:
target_diff = expected_count - testset_total
# find nearest diff.
keys = np.array(list(diffs.keys()))
idx = (np.abs(keys - target_diff)).argmin()
nearest = keys[idx]
if abs(target_diff) <= abs(target_diff - nearest):
break
choice = np.random.choice(range(len(diffs[nearest])))
id_test, id_trval = diffs[nearest][choice]
testset_total += nearest
new_diffs = dict()
for diff, IDs in diffs.items():
new_list = []
for id1, id2 in IDs:
if id1 == id_test or id2 == id_trval:
continue
new_list.append((id1, id2))
if 0 < len(new_list):
new_diffs[diff] = new_list
diffs = new_diffs
exchanges.append((id_test, id_trval))
# exchange
for id_test, id_trval in exchanges:
test[id_trval] = trval.pop(id_trval)
trval[id_test] = test.pop(id_test)
class _InstanceSpecificSplit(_TaskSpecificSplit):
"""
Splits a dataset into subsets(train/val/test),
using object annotations as a basis for splitting.|n
Tries to produce an image split with the specified ratio, keeping the
initial distribution of class objects.|n
|n
each image can have multiple object annotations -
(instance bounding boxes, masks, polygons). Since an image shouldn't be included
in multiple subsets at the same time, and image annotations
shouldn't be split, in general, dataset annotations are unlikely to be split
exactly in the specified ratio. |n
This split tries to split dataset images as close as possible
to the specified ratio, keeping the initial class distribution.|n
|n
Notes:|n
- Each image is expected to have one or more annotations.|n
- Only bbox annotations are considered in detection task.|n
- Mask or Polygon annotations are considered in segmentation task.|n
|n
Example: split dataset so that each object class annotations were split|n
|s|s|s|sin the specified ratio between subsets|n
|s|s%(prog)s -t detection --subset train:.5 --subset val:.2 --subset test:.3 |n
|s|s%(prog)s -t segmentation --subset train:.5 --subset val:.2 --subset test:.3
"""
def __init__(self, dataset, splits, task, seed=None):
"""
Parameters
----------
dataset : Dataset
splits : list
A list of (subset(str), ratio(float))
The sum of ratios is expected to be 1.
seed : int, optional
"""
super().__init__(dataset, splits, seed)
if task == SplitTask.detection.name:
self.annotation_type = [AnnotationType.bbox]
elif task == SplitTask.segmentation.name:
self.annotation_type = [AnnotationType.mask, AnnotationType.polygon]
def _group_by_labels(self, dataset):
by_labels = dict()
unlabeled = []
for idx, item in enumerate(dataset):
instance_anns = [a for a in item.annotations if a.type in self.annotation_type]
if len(instance_anns) == 0:
unlabeled.append(idx)
continue
for instance_ann in instance_anns:
label = getattr(instance_ann, "label", None)
if label not in by_labels:
by_labels[label] = [(idx, instance_ann)]
else:
by_labels[label].append((idx, instance_ann))
return by_labels, unlabeled
def _split_dataset(self):
np.random.seed(self._seed)
subsets, sratio = self._snames, self._sratio
# 1. group by bbox label
by_labels, unlabeled = self._group_by_labels(self._extractor)
# 2. group by attributes
required = self._get_required(sratio)
by_combinations = list()
for _, items in by_labels.items():
by_attributes = self._group_by_attr(items)
# merge groups which have too small samples.
attr_combinations = list(by_attributes.keys())
np.random.shuffle(attr_combinations) # add randomless
cluster = []
min_cluster = max(required, len(items) * 0.01) # temp solution
for attr in attr_combinations:
indice = by_attributes[attr]
if len(indice) >= min_cluster:
by_combinations.append(indice)
else:
cluster.extend(indice)
if len(cluster) >= min_cluster:
by_combinations.append(cluster)
cluster = []
if len(cluster) > 0:
by_combinations.append(cluster)
cluster = []
total = len(self._extractor)
# total number of GT samples per label-attr combinations
n_combs = [len(v) for v in by_combinations]
# 3-1. initially count per-image GT samples
counts_all = {}
for idx_img in range(total):
if idx_img not in unlabeled:
counts_all[idx_img] = dict()
for idx_comb, indice in enumerate(by_combinations):
for idx_img in indice:
if idx_comb not in counts_all[idx_img]:
counts_all[idx_img][idx_comb] = 1
else:
counts_all[idx_img][idx_comb] += 1
by_splits = dict()
for sname in self._subsets:
by_splits[sname] = []
target_ins = [] # target instance numbers to be split
for sname, ratio in zip(subsets, sratio):
target_ins.append([sname, np.array(n_combs) * ratio])
init_scores = {}
for idx_img, distributions in counts_all.items():
norm_sum = 0.0
for idx_comb, dis in distributions.items():
norm_sum += dis / n_combs[idx_comb]
init_scores[idx_img] = norm_sum
by_scores = dict()
for idx_img, score in init_scores.items():
if score not in by_scores:
by_scores[score] = [idx_img]
else:
by_scores[score].append(idx_img)
# functions for keep the # of annotations not exceed the target_ins num
def compute_penalty(counts, n_combs):
p = 0
for idx_comb, v in counts.items():
if n_combs[idx_comb] <= 0:
p += 1
else:
p += max(0, (v / n_combs[idx_comb]) - 1.0)
return p
def update_nc(counts, n_combs):
for idx_comb, v in counts.items():
n_combs[idx_comb] = n_combs[idx_comb] - v
# 3-2. assign each DatasetItem to a split, one by one
actual_ins = copy.deepcopy(target_ins)
for score in sorted(by_scores.keys(), reverse=True):
indice = by_scores[score]
np.random.shuffle(indice) # add randomness for the same score
for idx in indice:
counts = counts_all[idx]
# shuffling split order to add randomness
# when two or more splits have the same penalty value
np.random.shuffle(actual_ins)
pp = []
for sname, nc in actual_ins:
if np.sum(nc) <= 0:
# the split has enough instances,
# stop adding more images to this split
pp.append(1e08)
else:
# compute penalty based on the number of GT samples
# added in the split
pp.append(compute_penalty(counts, nc))
# we push an image to a split with the minimum penalty
midx = np.argmin(pp)
sname, nc = actual_ins[midx]
by_splits[sname].append(idx)
update_nc(counts, nc)
# split unlabeled data
if len(unlabeled) > 0:
self._split_unlabeled(unlabeled, by_splits)
self._set_parts(by_splits)
|
py
|
1a5a1c8a8479a4e5178c766b6595561f75e46e86
|
from django.shortcuts import render
from django.utils.safestring import mark_safe
import json
from user.models import User
def room(request, room_name, user_name):
print('****')
room_json = mark_safe(json.dumps(room_name))
user_json = mark_safe(json.dumps(user_name))
online = User.objects.filter(is_login=1)
offline = User.objects.filter(is_login=0)
data = User.objects.get(username=user_name)
return render(request, 'chat/room.html', {
'room_json': room_json,
'user_json': user_json,
'roomname': room_name,
'username': user_name,
'online': online,
'offline': offline,
'data':data,
})
|
py
|
1a5a1c9a6ae2ffca066f44fcdcce487675ce2038
|
# -*- coding: utf-8 -*-
__author__ = """Joanna Palewicz"""
__email__ = '[email protected]'
__version__ = '0.1.0'
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.