input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
np.mean(nll_M_var_val)] +
list(np.mean(np.array(kl_list_val), axis=0)) +
list(np.mean(np.array(kl_var_list_val), axis=0)) +
# list(np.mean(np.array(KLb_blocks_val),axis=0))
[np.mean(acc_var_val)] + list(np.mean(np.array(acc_var_blocks_val), axis=0))
)
perm_writer.writerow(list(np.mean(np.array(perm_train), axis=0)) +
list(np.mean(np.array(perm_val), axis=0))
)
log.flush()
if args.save_folder and np.mean(nll_M_val) < best_val_loss:
torch.save(encoder.state_dict(), encoder_file)
torch.save(decoder.state_dict(), decoder_file)
print('Best model so far, saving...')
return np.mean(nll_M_val)
def test():
t = time.time()
nll_test = []
nll_var_test = []
mse_1_test = []
mse_10_test = []
mse_20_test = []
kl_test = []
kl_list_test = []
kl_var_list_test = []
acc_test = []
acc_var_test = []
acc_blocks_test = []
acc_var_blocks_test = []
perm_test = []
KLb_test = []
KLb_blocks_test = [] # KL between blocks list
nll_M_test = []
nll_M_var_test = []
encoder.eval()
decoder.eval()
if not args.cuda:
encoder.load_state_dict(torch.load(encoder_file, map_location='cpu'))
decoder.load_state_dict(torch.load(decoder_file, map_location='cpu'))
else:
encoder.load_state_dict(torch.load(encoder_file))
decoder.load_state_dict(torch.load(decoder_file))
for batch_idx, (data, relations) in enumerate(test_loader):
with torch.no_grad():
if args.cuda:
data, relations = data.cuda(), relations.cuda()
assert (data.size(2) - args.timesteps) >= args.timesteps
data_encoder = data[:, :, :args.timesteps, :].contiguous()
data_decoder = data[:, :, -args.timesteps:, :].contiguous()
# stores the values of the uncertainty (log(sigma^2)). This will be an array of size [batchsize, no. of particles, time,no. of axes (isotropic = 1, anisotropic = 2)]
# initialise sigma to an array of large negative numbers which become small positive numbers when passted through softplus function.
logsigma = initlogsigma(len(data_decoder), len(data_decoder[0][0]), args.anisotropic, args.num_atoms, inversesoftplus(pow(args.var, 1/2), args.temp_softplus))
if args.cuda:
logsigma = logsigma.cuda()
# dim of logits, edges and prob are [batchsize, N^2-N, sum(edge_types_list)] where N = no. of particles
logits = encoder(data_encoder, rel_rec, rel_send)
if args.NRI:
edges = gumbel_softmax(logits, tau=args.temp, hard=args.hard)
prob = my_softmax(logits, -1)
loss_kl = kl_categorical_uniform(prob, args.num_atoms, edge_types)
loss_kl_split = [loss_kl]
loss_kl_var_split = [kl_categorical_uniform_var(prob, args.num_atoms, edge_types)]
KLb_test.append(0)
KLb_blocks_test.append([0])
acc_perm, perm, acc_blocks, acc_var, acc_var_blocks = edge_accuracy_perm_NRI(logits, relations, args.edge_types_list)
else:
logits_split = torch.split(logits, args.edge_types_list, dim=-1)
edges_split = tuple([gumbel_softmax(logits_i, tau=args.temp, hard=args.hard) for logits_i in logits_split])
edges = torch.cat(edges_split, dim=-1)
prob_split = [my_softmax(logits_i, -1) for logits_i in logits_split]
if args.prior:
loss_kl_split = [kl_categorical(prob_split[type_idx], log_prior[type_idx],
args.num_atoms) for type_idx in range(len(args.edge_types_list))]
loss_kl = sum(loss_kl_split)
else:
loss_kl_split = [kl_categorical_uniform(prob_split[type_idx], args.num_atoms,
args.edge_types_list[type_idx])
for type_idx in range(len(args.edge_types_list))]
loss_kl = sum(loss_kl_split)
loss_kl_var_split = [kl_categorical_uniform_var(prob_split[type_idx], args.num_atoms,
args.edge_types_list[type_idx])
for type_idx in range(len(args.edge_types_list))]
acc_perm, perm, acc_blocks, acc_var, acc_var_blocks = edge_accuracy_perm_fNRI(logits_split, relations,
args.edge_types_list, args.skip_first)
KLb_blocks = KL_between_blocks(prob_split, args.num_atoms)
KLb_test.append(sum(KLb_blocks).data.item())
KLb_blocks_test.append([KL.data.item() for KL in KLb_blocks])
if args.fixed_var:
target = data_decoder[:, :, 1:, :] # dimensions are [batch, particle, time, state]
output, logsigma, accel = decoder(data_decoder, edges, rel_rec, rel_send, logsigma, False, False, args.temp_softplus, 1)
if args.plot:
import matplotlib.pyplot as plt
output_plot, logsigma_plot, accel_plot = decoder(data_decoder, edges, rel_rec, rel_send, logsigma, False, False, args.temp_softplus, 49)
from trajectory_plot import draw_lines
if args.NRI:
acc_batch, perm, acc_blocks_batch = edge_accuracy_perm_NRI_batch(logits, relations,
args.edge_types_list)
else:
acc_batch, perm, acc_blocks_batch = edge_accuracy_perm_fNRI_batch(logits_split, relations,
args.edge_types_list)
sigma_plot = torch.exp(logsigma_plot/2)
for i in range(args.batch_size):
fig = plt.figure(figsize=(7, 7))
ax = fig.add_axes([0, 0, 1, 1])
xmin_t, ymin_t, xmax_t, ymax_t = draw_lines(target, i, linestyle=':', alpha=0.6)
xmin_o, ymin_o, xmax_o, ymax_o = draw_lines(output_plot.detach().cpu().numpy(), i, linestyle='-')
ax.set_xlim([min(xmin_t, xmin_o), max(xmax_t, xmax_o)])
ax.set_ylim([min(ymin_t, ymin_o), max(ymax_t, ymax_o)])
ax.set_xticks([])
ax.set_yticks([])
block_names = [str(j) for j in range(len(args.edge_types_list))]
acc_text = ['layer ' + block_names[j] + ' acc: {:02.0f}%'.format(100 * acc_blocks_batch[i, j])
for j in range(acc_blocks_batch.shape[1])]
acc_text = ', '.join(acc_text)
plt.text(0.5, 0.95, acc_text, horizontalalignment='center', transform=ax.transAxes)
plt.show()
loss_nll = nll_gaussian(output, target, args.var) # compute the reconstruction loss. nll_gaussian is from utils.py
loss_nll_var = nll_gaussian_var(output, target, args.var)
output_M, sigma_M, accel_M = decoder(data_decoder, edges, rel_rec, rel_send, logsigma, False, False, args.temp_softplus, args.prediction_steps)
loss_nll_M = nll_gaussian(output_M, target, args.var)
loss_nll_M_var = nll_gaussian_var(output_M, target, args.var)
perm_test.append(perm)
acc_test.append(acc_perm)
acc_blocks_test.append(acc_blocks)
acc_var_test.append(acc_var)
acc_var_blocks_test.append(acc_var_blocks)
output_10, sigma_10, accel_10 = decoder(data_decoder, edges, rel_rec, rel_send, logsigma, False, False, args.temp_softplus, 10)
output_20, sigma_20, accel_20 = decoder(data_decoder, edges, rel_rec, rel_send, logsigma, False, False, args.temp_softplus, 20)
mse_1_test.append(F.mse_loss(output, target).data.item())
mse_10_test.append(F.mse_loss(output_10, target).data.item())
mse_20_test.append(F.mse_loss(output_20, target).data.item())
nll_test.append(loss_nll.data.item())
kl_test.append(loss_kl.data.item())
kl_list_test.append([kl_loss.data.item() for kl_loss in loss_kl_split])
nll_var_test.append(loss_nll_var.data.item())
kl_var_list_test.append([kl_var.data.item() for kl_var in loss_kl_var_split])
nll_M_test.append(loss_nll_M.data.item())
nll_M_var_test.append(loss_nll_M_var.data.item())
else:
if args.anisotropic:
target = data_decoder[:, :, 1:, :] # dimensions are [batch, particle, time, state]
output, logsigmaone, accelone = decoder(data_decoder, edges, rel_rec, rel_send, logsigma, True, True, args.temp_softplus, 1)
if args.plot:
import matplotlib.pyplot as plt
output_plot, logsigma_plot, accel_plot = decoder(data_decoder, edges, rel_rec, rel_send, logsigma, True, True, args.temp_softplus, 49)
output_plot_en, sigma_plot_en, accel_plot_en = decoder(data_encoder, edges, rel_rec, rel_send, logsigma, True, True, args.temp_softplus, 49)
from trajectory_plot import draw_lines
if args.NRI:
acc_batch, perm, acc_blocks_batch = edge_accuracy_perm_NRI_batch(logits, relations,
args.edge_types_list)
else:
acc_batch, perm, acc_blocks_batch = edge_accuracy_perm_fNRI_batch(logits_split, relations,
args.edge_types_list)
sigma_plot = torch.exp(logsigma_plot / 2)
from trajectory_plot import draw_lines
from matplotlib.patches import Ellipse
for i in range(args.batch_size):
fig = plt.figure(figsize=(7, 7))
ax = fig.add_axes([0, 0, 1, 1])
xmin_t, ymin_t, xmax_t, ymax_t = draw_lines(target, i, linestyle=':', alpha=0.6)
xmin_o, ymin_o, xmax_o, ymax_o = draw_lines(output_plot.detach().cpu().numpy(), i,
linestyle='-')
indices_1 = torch.LongTensor([0, 1])
indices_2 = torch.LongTensor([2, 3])
indices_3 = torch.LongTensor([0])
if args.cuda:
indices_1, indices_2, indices_3 = indices_1.cuda(), indices_2.cuda(), indices_3.cuda()
positions = torch.index_select(output_plot, 3, indices_1)
ellipses = []
# plots the uncertainty ellipses for gaussian case.
# iterate through each of the atoms
# need to get the angles of the terms to be plotted:
velocities = torch.index_select(output_plot, 3, indices_2)
velnorm = velocities.norm(p=2, dim=3, keepdim=True)
normalisedvel = velocities.div(velnorm.expand_as(velocities))
# v||.x is just the first term of the tensor
normalisedvelx = torch.index_select(normalisedvel, 3, indices_3)
# angle of rotation is Theta = acos(v||.x) for normalised v|| and x (need angle in degrees not radians)
angle = torch.acos(normalisedvelx).squeeze() * 180 / 3.14159
for j in range(positions.size()[1]):
# get the first timestep component of (x,y) and angles
ellipses.append(
Ellipse((positions.tolist()[i][j][0][0], positions.tolist()[i][j][0][1]),
width=sigma_plot.tolist()[i][j][0][0],
height=sigma_plot.tolist()[i][j][0][1], angle=angle.tolist()[i][j][0]))
# if Deltax^2+Deltay^2>4*(DeltaSigmax^2+DeltaSigma^2) then plot, else do not plot
for k in range(positions.size()[2] - 1):
deltar = (torch.from_numpy(positions.cpu().numpy()[i][j][k + 1]) - torch.from_numpy(
positions.cpu().numpy()[i][j][k])).norm(p=2, dim=0, keepdim=True)
deltasigma = (torch.from_numpy(sigma_plot.cpu().numpy()[i][j][k + 1])).norm(p=2,
dim=0,
keepdim=True)
if (deltar.item() > 2 * deltasigma.item()):
ellipses.append(
Ellipse((positions.tolist()[i][j][k + 1][0], positions[i][j][k + 1][1]),
width=sigma_plot.tolist()[i][j][k + 1][0],
height=sigma_plot.tolist()[i][j][k + 1][1],
angle=angle.tolist()[i][j][k + 1]))
fig1, ax1 = plt.subplots(subplot_kw={'aspect': 'equal'})
for e in ellipses:
ax1.add_artist(e)
e.set_clip_box(ax1.bbox)
e.set_alpha(0.6)
ax.set_xlim([min(xmin_t, xmin_o), max(xmax_t, xmax_o)])
ax.set_ylim([min(ymin_t, ymin_o), max(ymax_t, ymax_o)])
ax.set_xticks([])
ax.set_yticks([])
block_names = ['layer ' + str(j) for j in range(len(args.edge_types_list))]
# block_names = [ 'springs', 'charges' ]
acc_text = [block_names[j] + ' acc: {:02.0f}%'.format(100 * acc_blocks_batch[i, j])
for j in range(acc_blocks_batch.shape[1])]
acc_text = ', '.join(acc_text)
plt.text(0.5, 0.95, acc_text, horizontalalignment='center', transform=ax.transAxes)
# plt.savefig(os.path.join(args.load_folder,str(i)+'_pred_and_true.png'), dpi=300)
plt.show()
loss_nll, loss_1, loss_2 = nll_gaussian_multivariatesigma_efficient(output, target, logsigmaone, accelone) # compute the reconstruction loss. nll_gaussian is from utils.py
loss_nll_var = nll_gaussian_var_multivariatesigma_efficient(output, target, logsigmaone, accelone)
output_M, sigma_M, accel_M = decoder(data_decoder, edges, rel_rec, rel_send, logsigma, True, True, args.temp_softplus, args.prediction_steps)
loss_nll_M, loss_1_M, loss_2_M = nll_gaussian_multivariatesigma_efficient(output_M, target, sigma_M, accel_M)
loss_nll_M_var = nll_gaussian_var_multivariatesigma_efficient(output_M, target, sigma_M, accel_M)
perm_test.append(perm)
acc_test.append(acc_perm)
acc_blocks_test.append(acc_blocks)
acc_var_test.append(acc_var)
acc_var_blocks_test.append(acc_var_blocks)
output_10, sigma_10, accel_10 = decoder(data_decoder, edges, rel_rec, rel_send, logsigma, True, True, args.temp_softplus, 10)
output_20, sigma_20, accel_20 = decoder(data_decoder, edges, rel_rec, rel_send, logsigma, True, True, args.temp_softplus, 20)
mse_1_test.append(F.mse_loss(output, target).data.item())
mse_10_test.append(F.mse_loss(output_10, target).data.item())
mse_20_test.append(F.mse_loss(output_20, target).data.item())
nll_test.append(loss_nll.data.item())
kl_test.append(loss_kl.data.item())
kl_list_test.append([kl_loss.data.item() for kl_loss in loss_kl_split])
nll_var_test.append(loss_nll_var.data.item())
kl_var_list_test.append([kl_var.data.item() for kl_var in loss_kl_var_split])
nll_M_test.append(loss_nll_M.data.item())
nll_M_var_test.append(loss_nll_M_var.data.item())
logsigma = logsigmaone
else:
target = data_decoder[:, :, 1:, :] # dimensions are [batch, particle, time, state]
output, logsigmaone, accelone = decoder(data_decoder, edges, rel_rec, rel_send, logsigma, True, False, args.temp_softplus, 1)
if args.plot:
import matplotlib.pyplot as plt
output_plot, logsigma_plot, accel_plot = decoder(data_decoder, edges, rel_rec, rel_send, logsigma, True, False, args.temp_softplus, 49)
output_plot_en, sigma_plot_en, accel_plot_en = decoder(data_encoder, edges, rel_rec, rel_send, logsigma, True, False, args.temp_softplus, 49)
from trajectory_plot import draw_lines
sigma_plot = torch.exp(logsigma_plot / 2)
if args.NRI:
acc_batch, perm, acc_blocks_batch = edge_accuracy_perm_NRI_batch(logits, relations,
args.edge_types_list)
else:
acc_batch, perm, acc_blocks_batch = edge_accuracy_perm_fNRI_batch(logits_split, relations,
args.edge_types_list)
from trajectory_plot import draw_lines
from matplotlib.patches import Ellipse
for i in range(args.batch_size):
fig = plt.figure(figsize=(7, 7))
ax = fig.add_axes([0, 0, 1, 1])
xmin_t, ymin_t, xmax_t, ymax_t = draw_lines(target, i, linestyle=':', alpha=0.6)
xmin_o, ymin_o, xmax_o, ymax_o = draw_lines(output_plot.detach().cpu().numpy(), i,
linestyle='-')
# isotropic therefore the ellipses become circles
indices = torch.LongTensor([0, 1])
if args.cuda:
indices = indices.cuda()
positions = torch.index_select(output_plot, 3, indices)
ellipses = []
# iterate through each of the atoms
for j in range(positions.size()[1]):
# get the first timestep component of (x,y)
ellipses.append(
Ellipse((positions.tolist()[i][j][0][0], positions.tolist()[i][j][0][1]),
width=sigma_plot.tolist()[i][j][0][0],
height=sigma_plot.tolist()[i][j][0][0], angle=0.0))
# if Deltax^2+Deltay^2>4*(DeltaSigmax^2+DeltaSigma^2) then plot, else do not plot
for k in range(positions.size()[2] - 1):
deltar = (torch.from_numpy(positions.cpu().numpy()[i][j][k + 1]) - torch.from_numpy(
positions.cpu().numpy()[i][j][k])).norm(p=2, dim=0, keepdim=True)
deltasigma = (torch.from_numpy(sigma_plot.cpu().numpy()[i][j][k + 1])).norm(p=2,
dim=0,
keepdim=True)
if (deltar.item() > 2 * deltasigma.item()):
ellipses.append(
Ellipse((positions.tolist()[i][j][k + | |
import numpy as np
import matplotlib.pyplot as plt
import theano
# By convention, the tensor submodule is loaded as T
import theano.tensor as T
class Layer(object):
def __init__(self, W_init, b_init, activation):
'''
A layer of a neural network, computes s(Wx + b) where s is a nonlinearity and x is the input vector.
:parameters:
- W_init : np.ndarray, shape=(n_output, n_input)
Values to initialize the weight matrix to.
- b_init : np.ndarray, shape=(n_output,)
Values to initialize the bias vector
- activation : theano.tensor.elemwise.Elemwise
Activation function for layer output
'''
# Retrieve the input and output dimensionality based on W's initialization
n_output, n_input = W_init.shape
# Make sure b is n_output in size
assert b_init.shape == (n_output,)
# All parameters should be shared variables.
# They're used in this class to compute the layer output,
# but are updated elsewhere when optimizing the network parameters.
# Note that we are explicitly requiring that W_init has the theano.config.floatX dtype
self.W = theano.shared(value=W_init.astype(theano.config.floatX),
# The name parameter is solely for printing purporses
name='W',
# Setting borrow=True allows Theano to use user memory for this object.
# It can make code slightly faster by avoiding a deep copy on construction.
# For more details, see
# http://deeplearning.net/software/theano/tutorial/aliasing.html
borrow=True)
# We can force our bias vector b to be a column vector using numpy's reshape method.
# When b is a column vector, we can pass a matrix-shaped input to the layer
# and get a matrix-shaped output, thanks to broadcasting (described below)
self.b = theano.shared(value=b_init.reshape(-1, 1).astype(theano.config.floatX),
name='b',
borrow=True,
# Theano allows for broadcasting, similar to numpy.
# However, you need to explicitly denote which axes can be broadcasted.
# By setting broadcastable=(False, True), we are denoting that b
# can be broadcast (copied) along its second dimension in order to be
# added to another variable. For more information, see
# http://deeplearning.net/software/theano/library/tensor/basic.html
broadcastable=(False, True))
self.activation = activation
# We'll compute the gradient of the cost of the network with respect to the parameters in this list.
self.params = [self.W, self.b]
def output(self, x):
'''
Compute this layer's output given an input
:parameters:
- x : theano.tensor.var.TensorVariable
Theano symbolic variable for layer input
:returns:
- output : theano.tensor.var.TensorVariable
Mixed, biased, and activated x
'''
# Compute linear mix
lin_output = T.dot(self.W, x) + self.b
# Output is just linear mix if no activation function
# Otherwise, apply the activation function
return (lin_output if self.activation is None else self.activation(lin_output))
class MLP(object):
def __init__(self, W_init, b_init, activations):
'''
Multi-layer perceptron class, computes the composition of a sequence of Layers
:parameters:
- W_init : list of np.ndarray, len=N
Values to initialize the weight matrix in each layer to.
The layer sizes will be inferred from the shape of each matrix in W_init
- b_init : list of np.ndarray, len=N
Values to initialize the bias vector in each layer to
- activations : list of theano.tensor.elemwise.Elemwise, len=N
Activation function for layer output for each layer
'''
# Make sure the input lists are all of the same length
assert len(W_init) == len(b_init) == len(activations)
# Initialize lists of layers
self.layers = []
# Construct the layers
for W, b, activation in zip(W_init, b_init, activations):
self.layers.append(Layer(W, b, activation))
# Combine parameters from all layers
self.params = []
for layer in self.layers:
self.params += layer.params
def output(self, x):
'''
Compute the MLP's output given an input
:parameters:
- x : theano.tensor.var.TensorVariable
Theano symbolic variable for network input
:returns:
- output : theano.tensor.var.TensorVariable
x passed through the MLP
'''
# Recursively compute output
for layer in self.layers:
x = layer.output(x)
return x
def squared_error(self, x, y):
'''
Compute the squared euclidean error of the network output against the "true" output y
:parameters:
- x : theano.tensor.var.TensorVariable
Theano symbolic variable for network input
- y : theano.tensor.var.TensorVariable
Theano symbolic variable for desired network output
:returns:
- error : theano.tensor.var.TensorVariable
The squared Euclidian distance between the network output and y
'''
return T.sum((self.output(x) - y)**2)
def gradient_updates_momentum(cost, params, learning_rate, momentum):
'''
Compute updates for gradient descent with momentum
:parameters:
- cost : theano.tensor.var.TensorVariable
Theano cost function to minimize
- params : list of theano.tensor.var.TensorVariable
Parameters to compute gradient against
- learning_rate : float
Gradient descent learning rate
- momentum : float
Momentum parameter, should be at least 0 (standard gradient descent) and less than 1
:returns:
updates : list
List of updates, one for each parameter
'''
# Make sure momentum is a sane value
assert momentum < 1 and momentum >= 0
# List of update steps for each parameter
updates = []
# Just gradient descent on cost
for param in params:
# For each parameter, we'll create a param_update shared variable.
# This variable will keep track of the parameter's update step across iterations.
# We initialize it to 0
param_update = theano.shared(param.get_value()*0., broadcastable=param.broadcastable)
# Each parameter is updated by taking a step in the direction of the gradient.
# However, we also "mix in" the previous step according to the given momentum value.
# Note that when updating param_update, we are using its old value and also the new gradient step.
updates.append((param, param - learning_rate*param_update))
# Note that we don't need to derive backpropagation to compute updates - just use T.grad!
updates.append((param_update, momentum*param_update + (1. - momentum)*T.grad(cost, param)))
return updates
# Training data - two randomly-generated Gaussian-distributed clouds of points in 2d space
np.random.seed(0)
# Number of points
N = 1000
# Labels for each cluster
y = np.random.random_integers(0, 1, N)
# Mean of each cluster
means = np.array([[-1, 1], [-1, 1]])
# Covariance (in X and Y direction) of each cluster
covariances = np.random.random_sample((2, 2)) + 1
# Dimensions of each point
X = np.vstack([np.random.randn(N)*covariances[0, y] + means[0, y],
np.random.randn(N)*covariances[1, y] + means[1, y]])
# Plot the data
plt.figure(figsize=(8, 8))
plt.scatter(X[0, :], X[1, :], c=y, lw=.3, s=3, cmap=plt.cm.cool)
plt.axis([-6, 6, -6, 6])
plt.show()
# First, set the size of each layer (and the number of layers)
# Input layer size is training data dimensionality (2)
# Output size is just 1-d: class label - 0 or 1
# Finally, let the hidden layers be twice the size of the input.
# If we wanted more layers, we could just add another layer size to this list.
layer_sizes = [X.shape[0], X.shape[0]*2, 1]
# Set initial parameter values
W_init = []
b_init = []
activations = []
for n_input, n_output in zip(layer_sizes[:-1], layer_sizes[1:]):
# Getting the correct initialization matters a lot for non-toy problems.
# However, here we can just use the following initialization with success:
# Normally distribute initial weights
W_init.append(np.random.randn(n_output, n_input))
# Set initial biases to 1
b_init.append(np.ones(n_output))
# We'll use sigmoid activation for all layers
# Note that this doesn't make a ton of sense when using squared distance
# because the sigmoid function is bounded on [0, 1].
activations.append(T.nnet.sigmoid)
# Create an instance of the MLP class
mlp = MLP(W_init, b_init, activations)
# Create Theano variables for the MLP input
mlp_input = T.matrix('mlp_input')
# ... and the desired output
mlp_target = T.vector('mlp_target')
# Learning rate and momentum hyperparameter values
# Again, for non-toy problems these values can make a big difference
# as to whether the network (quickly) converges on a good local minimum.
learning_rate = 0.01
momentum = 0.9
# Create a function for computing the cost of the network given an input
cost = mlp.squared_error(mlp_input, mlp_target)
# Create a theano function for training the network
train = theano.function([mlp_input, mlp_target], cost,
updates=gradient_updates_momentum(cost, mlp.params, learning_rate, momentum))
# Create a theano function for computing the MLP's output given some input
mlp_output = theano.function([mlp_input], mlp.output(mlp_input))
# Keep track of the number of training iterations performed
iteration = 0
# We'll only train the network with 20 iterations.
# A more common technique is to use a hold-out validation set.
# When the validation error starts to increase, the network is overfitting,
# so we stop training the net. This is called "early stopping", which we won't do here.
max_iteration = 20
while iteration < max_iteration:
# Train the network using the entire training set.
# With large datasets, it's much more common to use stochastic or mini-batch gradient descent
# | |
"""Convert XFL edges to SVG paths.
If you just want to convert, use `xfl_edge_to_svg_path()`. If you're interested
in how everything works, read on.
"""
# Read these links first, as there is no official documentation for the XFL
# edge format:
#
# * https://github.com/SasQ/SavageFlask/blob/master/doc/FLA.txt
# * https://stackoverflow.com/a/4077709
#
# Overview:
#
# In Animate, graphic symbols are made of filled shapes and stroked paths.
# Both are defined by their outline, which Animate breaks into pieces. We'll
# call such a piece a "segment", rather than an "edge", to avoid confusion
# with the edge format.
#
# A segment may be part of up to two shapes: one on its left and one on its
# right. This is determined by the presence of the "fillStyle0" (left) and
# "fillStyle1" (right) attributes, which specify the style for the shape on
# that side.
#
# A segment may be part of up to one stroked path. This is determined by the
# presence of the "strokeStyle" attribute.
#
# So, to extract graphic symbols from XFL, we first convert the edge format
# into segments (represented as point lists, see below). Each <Edge> element
# produces one or more segments, each of which inherits the <Edge>'s
# "fillStyle0", "fillStyle1", and "strokeStyle" attributes.
#
# Then, for filled shapes, we join segments of the same fill style by
# matching their start/end points. The fill styles must be for the same
# side. For stroked paths, we just collect all segments of the same style.
#
# Finally, we convert segments to the SVG path format, put them in an SVG
# <path> element, and assign fill/stroke style attributes to the <path>.
from collections import defaultdict
import re
from typing import Iterator
import xml.etree.ElementTree as ET
# The XFL edge format can be described as follows:
#
# start : moveto (moveto | lineto | quadto)*
# moveto : "!" NUMBER ~ 2 select? // Move to this point
# lineto : ("|" | "/") NUMBER ~ 2 // Line from current point to here
# quadto : ("[" | "]") NUMBER ~ 4 // Quad Bézier (control point, dest)
# select : /S[1-7]/ // Only used by Animate
# NUMBER : /-?\d+(\.\d+)?/ // Decimal number
# | /#[A-Z0-9]{1,6}\.[A-Z0-9]{1,2}/ // Signed, 32-bit number in hex
# %import common.WS // Ignore whitespace
# %ignore WS
#
# Notes:
# * This grammar is written for use with Lark, a Python parsing toolkit. See:
# * Project page: https://github.com/lark-parser/lark
# * Try it online: https://www.lark-parser.org/ide/
# * The cubic commands are omitted:
# * They only appear in the "cubics" attribute and not in "edges"
# * They're just hints for Animate and aren't needed for conversion to SVG
# * "select" is also just a hint for Animate, but it appears in "edges", so we
# include it for completeness.
#
# Anyhow, this language can actually be tokenized with a single regex, which is
# faster than using Lark:
EDGE_TOKENIZER = re.compile(
r"""
[!|/[\]] | # Move to, line to, quad to
(?<!S)-?\d+(?:\.\d+)? | # Decimal number
\#[A-Z0-9]+\.[A-Z0-9]+ # Hex number
""",
re.VERBOSE,
)
# Notes:
# * Whitespace is automatically ignored, as we only match what we want.
# * The negative lookbehind assertion (?<!S) is needed to avoid matching the
# digit in select commands as a number.
# After tokenizing, we need to parse numbers:
def parse_number(num: str) -> float:
"""Parse an XFL edge format number."""
if num[0] == "#":
# Signed, 32-bit fixed-point number in hex
parts = num[1:].split(".")
# Pad to 8 digits
hex_num = "{:>06}{:<02}".format(*parts)
num = int.from_bytes(bytes.fromhex(hex_num), "big", signed=True)
# Account for hex scaling and Animate's 20x scaling (twips)
return (num / 256) / 20
else:
# Decimal number. Account for Animate's 20x scaling (twips)
return float(num) / 20
# Notes:
# * The <path>s produced by Animate's SVG export sometimes have slightly
# different numbers (e.g. flooring or subtracting 1 from decimals before
# dividing by 20). It's not clear how this works or if it's even intended,
# so I gave up trying to replicate it.
# * Animate prints round numbers as integers (e.g. "1" instead of "1.0"), but
# it makes no difference for SVG.
# Now, we can parse the edge format. To join segments into shapes, though, we
# will need a way to reverse segments (for normalizing them so that the filled
# shape is always on the left). That is, if we have a segment like:
#
# C
# / \
# | |
# A ----- B D ----- E
#
# which is represented by:
#
# moveto A, lineto B, quadto C D, lineto E
#
# We should be able to reverse it and get:
#
# moveto E, lineto D, quadto C B, lineto A
#
# The "point list" format (couldn't think of a better name) meets this
# requirement. The segment above would be represented as:
#
# [A, B, (C,), D, E]
#
# The first point is always the destination of a "move to" command. Subsequent
# points are the destinations of "line to" commands. If a point is in a tuple
# like `(C,)`, then it's the control point of a quadratic Bézier curve, and the
# following point is the destination of the curve. (Tuples are just an easy way
# to mark points--there's nothing particular about the choice.)
#
# With this format, we can see that reversing the list gives us the same
# segment, but in reverse:
#
# [E, D, (C,), B, A]
#
# In practice, each point is represented as a coordinate string, so the actual
# point list might look like:
#
# ["0 0", "10 0", ("20 10",), "30 0", "40 0"]
#
# This next function converts the XFL edge format into point lists. Since each
# "edges" attribute can contain multiple segments, but each point list only
# represents one segment, this function can yield multiple point lists.
def edge_format_to_point_lists(edges: str) -> Iterator[list]:
"""Convert the XFL edge format to point lists.
Args:
edges: The "edges" attribute of an XFL <Edge> element
Yields:
One point list for each segment parsed out of `edges`
"""
tokens = iter(EDGE_TOKENIZER.findall(edges))
def next_point():
return f"{parse_number(next(tokens))} {parse_number(next(tokens))}"
assert next(tokens) == "!", "Edge format must start with moveto (!) command"
prev_point = next_point()
point_list = [prev_point]
try:
while True:
command = next(tokens)
curr_point = next_point()
if command == "!":
# Move to
if curr_point != prev_point:
# If a move command doesn't change the current point, we
# ignore it. Otherwise, a new segment is starting, so we
# must yield the current point list and begin a new one.
yield point_list
point_list = [curr_point]
prev_point = curr_point
elif command in "|/":
# Line to
point_list.append(curr_point)
prev_point = curr_point
else:
# Quad to. The control point (curr_point) is marked by putting
# it in a tuple.
point_list.append((curr_point,))
prev_point = next_point()
point_list.append(prev_point)
except StopIteration:
yield point_list
# The next function converts point lists into the SVG path format.
def point_list_to_path_format(point_list: list) -> str:
"""Convert a point list into the SVG path format."""
point_iter = iter(point_list)
path = ["M", next(point_iter)]
last_command = "M"
try:
while True:
point = next(point_iter)
command = "Q" if isinstance(point, tuple) else "L"
# SVG lets us omit the command letter if we use the same command
# multiple times in a row.
if command != last_command:
path.append(command)
last_command = command
if command == "Q":
# Append control point and destination point
path.append(point[0])
path.append(next(point_iter))
else:
path.append(point)
except StopIteration:
if point_list[0] == point_list[-1]:
# Animate adds a "closepath" (Z) command to every filled shape and
# closed stroke. For shapes, it makes no difference, but for closed
# strokes, it turns two overlapping line caps into a bevel, miter,
# or round join, which does make a difference.
# TODO: It is likely that closed strokes can be broken into
# segments and spread across multiple Edge elements, which would
# require a function like point_lists_to_shapes(), but for strokes.
# For now, though, adding "Z" to any stroke that is already closed
# seems good enough.
path.append("Z")
return " ".join(path)
# Finally, we can convert XFL <Edge> elements into SVG <path> elements. The
# algorithm works as follows:
# First, convert the "edges" attributes into segments. Then:
#
# For filled shapes:
# * For a given <Edge>, process each of its segments:
# * If the <Edge> has "fillStyle0", associate | |
#!/usr/bin/python
#-*- coding: utf-8 -*-
#Developer by Bafomet
import os
import time as t
import subprocess
from module.utils.banner import show_banner
WHSL = '\033[1;32m'
ENDL = '\033[0m'
REDL = '\033[0;31m'
GNSL = '\033[1;34m'
arrow = f" {REDL}└──>{WHSL}"
connect = f"{REDL}│{WHSL}"
page_1 = '''{0}
{1}Использовать в связке с {0}Metasploit framework.
{1}Satana sploit 2.0
{1}[ {0}1{1} ] {2} Показать подключенные устройства. {1}[ {0}10{1} ] {2} Перезагрузка устройства. {1}[ {0}19{1} ] {2} Извлечь apk из приложения.
{1}[ {0}2{1} ] {2} Отключить все устройства. {1}[ {0}11{1} ] {2} Удалить приложение. {1}[ {0}20{1} ] {0} Get Battery Status.
{1}[ {0}3{1} ] {2} Подключите новое устройство. {1}[ {0}12{1} ] {2} Показать журнал устройства. {1}[ {0}21{1} ] {0} Get Network Status.
{1}[ {0}4{1} ] {2} Доступ через {0}shell. {1}[ {0}13{1} ] {2} Dump {0}System Info. {1}[ {0}22{1} ] {2} Включение / выключение {0}Wi-Fi.
{1}[ {0}5{1} ] {2} Установите{0} apk{2} на устройство. {1}[ {0}14{1} ] {2} Список всех приложений. {1}[ {0}23{1} ] {2} Удалить пароль устройства.
{1}[ {0}6{1} ] {2} Записать видео с экрана. {1}[ {0}15{1} ] {2} Запустить приложение. {1}[ {0}24{1} ] {2} Эмуляция нажатия клавиш.
{1}[ {0}7{1} ] {2} Получить {0}screenshot. {1}[ {0}16{1} ] {2} Port Forwarding. {1}[ {0}25{1} ] {2} Получить текущую активность (Логи).
{1}[ {0}8{1} ] {2} Перезапустить ваш{0} server. {1}[ {0}17{1} ] {0} Grab wpa_supplicant. {1}[ {0}26{1} ] {2} Массовое подключение устройств.
{1}[ {0}9{1} ] {2} Получить файлы из устройства. {1}[ {0}18{1} ] {0} Show Mac/Inet.
{1}[ {0}99{1} ] {2}Выйти и отключить{0} adb server. {1}[ {0}66{1} ]{2} Очистить. {1}[ {0}88{1} ]{2} Отключить{0} server.
'''.format(GNSL, REDL, WHSL)
def android_debug():
os.system("printf '\033]2;OSINT SAN 3.5\a'")
print()
print("{1} [ {0}+{1} ]{2} Запуск ADB сервера...".format(REDL, GNSL, WHSL))
print()
print("{1} [ {0}+{1} ]{2} Подождите 5 секунд.".format(REDL, GNSL, WHSL))
print()
subprocess.call("adb tcpip 5555 >> /dev/null", shell=True)
show_banner(clear=True)
print(page_1)
device_name = None
while True:
try:
option = input(
f"{REDL} └──>{ENDL} Android Debug Bridge {GNSL}[{REDL} main_menu {GNSL}]{ENDL}:")
except KeyboardInterrupt:
return
if option == '1':
if not device_name:
subprocess.call("adb devices -l", shell=True)
else:
subprocess.call("adb devices -l", shell=True)
elif option == '2':
if not device_name:
print("{1} [{0} + {1}]{2} Нет подключенных устройств к серверу.\n".format(REDL, GNSL, WHSL))
else:
subprocess.call("adb disconnect", shell=True)
elif option == '3':
print("\n {1}[{0} + {1}]{2} Введите IP address.\n".format(REDL, GNSL, WHSL))
try:
device_name = input(f"{arrow} Android Debug Bridge {GNSL}[{REDL} connect_device {GNSL}]{ENDL}:")
except KeyboardInterrupt:
continue
if device_name == '':
continue
if device_name == '27':
continue
subprocess.call(f"adb connect {device_name}:5555", shell=True)
elif option == '4':
if not device_name:
print("{1} [{0} + {1}]{2} Нет подключенных устройств.".format(REDL, GNSL, WHSL))
else:
subprocess.call(f"adb -s {device_name} shell", shell=True)
elif option == '5':
if not device_name:
print("{1}[{0}+{1}]{2} Нет подключенных устройств.".format(REDL, GNSL, WHSL))
else:
print(f" {connect}")
print(" {1}[{0} + {1}]{2} Введите локацию apk. Пример /home/salita/Desktop/test.apk.\n".format(REDL, GNSL, WHSL))
apk_location = input(f" {arrow} Android Debug Bridge{GNSL}[{REDL} apk_install {GNSL}]{ENDL}:")
subprocess.call("adb -s "+device_name+" install "+apk_location, shell=True)
print(f" {GNSL}Apk был установлен.")
elif option == '6':
if not device_name:
print("{1}[{0}+{1}]{2} Нет подключенных устройств.".format(REDL, GNSL, WHSL))
else:
print(f" {connect}")
print(" {1}[{0} + {1}]{2} Запись видео началась.".format(REDL, GNSL, WHSL))
print(f" {connect}")
print(" {2}Нажми{1} ctrl+c{2} для остановки записи.\n".format(REDL, GNSL, WHSL))
os.system(f"adb -s {device_name} shell screenrecord /sdcard/screen.mp4")
print(" {1}[{0} + {1}]{2} Укажите, где вы хотите сохранить видео.\n".format(REDL, GNSL, WHSL))
print(" {2} Пример:{1} /home/apashe/sc.mp4\n".format(REDL, GNSL, WHSL))
place_location = input(f" {arrow} Android Debug Bridge {GNSL}[{REDL}screen_record{GNSL}]{ENDL}:")
os.system(f"adb -s {device_name} pull /sdcard/screen.mp4 {place_location}")
print(" {0} Видео {2}успешно загружено.".format(REDL, GNSL, WHSL))
t.sleep(4)
show_banner(clear=True)
print(page_1)
elif option == '7':
if not device_name:
print("{1}[{0}+{1}]{2} Нет подключенных устройств.".format(REDL, GNSL, WHSL))
else:
os.system(f"adb -s {device_name} shell screencap /sdcard/screen.png")
print(f" {connect}")
print(" {1}[{0} + {1}]{2} Введите, где вы хотите сохранить снимок экрана.\n".format(REDL, GNSL, WHSL))
print(" {1}Пример:{0} /home/apashe/screen.png\n".format(REDL, GNSL, WHSL))
place_location = input(f" {arrow} Android Debug Bridge {GNSL}[{REDL} screenshot {GNSL}]{ENDL}:")
os.system(f"adb -s {device_name} pull /sdcard/screen.png {place_location}")
print(" {0} Скриншот {2}успешно загружен.".format(REDL, GNSL, WHSL))
t.sleep(4)
show_banner(clear=True)
print(page_1)
elif option == '8':
print("")
print("{1} [{0} + {1}]{2} Перезапуск сервера ADB...{3}\n".format(REDL, GNSL, WHSL, ENDL))
os.system("adb disconnect >> /dev/null")
os.system("adb kill-server >> /dev/null")
os.system("adb start-server >> /dev/null")
print(" {0} Сервер успешно перезагружен".format(REDL, GNSL, WHSL, ENDL))
t.sleep(4)
show_banner(clear=True)
print(page_1)
elif option == '9':
if not device_name:
print("{1}[{0}+{1}]{2} Устройства еще не подключены.".format(REDL, GNSL, WHSL))
else:
print(f" {connect}")
print(" {1}[{0} + {1}]{2} Введите местоположение файла на устройстве.\n".format(REDL, GNSL, WHSL))
print(" {1}[{0} + {1}]{2} Пример:{0} /sdcard/DCIM/demo.mp4 \n".format(REDL, GNSL, WHSL))
file_location = input(f" {arrow} Android Debug Bridge {GNSL}[{REDL} file_pull {GNSL}]{ENDL}:")
print(f" {connect}")
print(" {1}[{0} + {1}]{2} Пример:{0} /home/apashe/Desktop\n".format(REDL, GNSL, WHSL))
print(" {1}[{0} + {1}]{2} Введите, где вы хотите сохранить файл.\n".format(REDL, GNSL, WHSL))
place_location = input(f" {arrow} Android Debug Bridge {GNSL}[{REDL} file_pull {GNSL}]{ENDL}:")
os.system(f"adb -s {device_name} pull {file_location} {place_location}")
print(page_1)
elif option == '10':
if not device_name:
print("{1}[{0}+{1}]{2} Устройства не подключены.".format(REDL, GNSL, WHSL))
else:
os.system(f"adb -s {device_name} reboot ")
print(" {2}Устройство будет перезагруженно, ожидайте 1 минуту прежде чем вновь повторить подключение.".format(REDL, GNSL, WHSL))
t.sleep(4)
show_banner(clear=True)
print(page_1)
elif option == '11':
if not device_name:
print("{1}[{0} + {1}]{2} Устройства не подключены.".format(REDL, GNSL, WHSL))
else:
print(f" {connect}")
print(" {1}[{0} + {1}]{2} Введите название pack_name \n".format(REDL, GNSL, WHSL))
package_name = input(f" {arrow} Android Debug Bridge {GNSL}[{REDL} app_delete {GNSL}]{ENDL}:")
os.system(f"adb -s {device_name} uninstall {package_name}")
elif option == '12':
if not device_name:
print("{1} [{0} + {1}]{2} Устройства не подключены.".format(REDL, GNSL, WHSL))
else:
print("\n {2}Нажми{1} ctrl+c{2} для остановки.{0}\n".format(REDL, GNSL, WHSL))
t.sleep(4)
os.system(f'adb -s {device_name} logcat ')
elif option == '13':
if not device_name:
print("{1} [{0} + {1}]{2} Устройства не подключены.".format(REDL, GNSL, WHSL))
else:
print("\n {2}Нажми{1} ctrl+c{2} для остановки.{0}\n".format(REDL, GNSL, WHSL))
t.sleep(4)
os.system(f"adb -s {device_name} shell dumpsys")
elif option == '14':
if not device_name:
print("{1} [{0} + {1}]{2} Устройства не подключены.".format(REDL, GNSL, WHSL))
os.system(f"adb -s {device_name} shell pm list packages -f")
elif option == '15':
if not device_name:
print("{1} [{0} + {1}]{2} Устройства не подключены.".format(REDL, GNSL, WHSL))
print(f" {connect}")
print(" {1}[{0} + {1}]{2} Введите название приложения.\n".format(REDL, GNSL, WHSL))
package_name = input(f" {arrow} Android Debug Bridge {GNSL}[{REDL} Запуск приложения {GNSL}]{ENDL}:")
os.system(f"adb -s {device_name} shell monkey -p "+package_name+" -v 500")
elif option == '16':
if not device_name:
print("{1} [{0} + {1}]{2} Устройства не подключены.".format(REDL, GNSL, WHSL))
else:
print(f" {connect}")
print(" {1}[{0} + {1}]{2} Введите порт на устройстве.\n".format(REDL, GNSL, WHSL))
port_device = input(" " +arrow + " Android Debug Bridge"+GNSL+"["+REDL + " port_forward " + GNSL + "]"+ENDL + ":")
print(f" {connect}")
print(" {1}[{0} + {1}]{2} Введите порт для пересылки.\n".format(REDL, GNSL, WHSL))
forward_port = input(" "+arrow + " Android Debug Bridge "+GNSL+"["+REDL + " port_forward " + GNSL + "]"+ENDL + ":")
os.system(f"adb -s {device_name} forward tcp:"+port_device+" tcp:"+forward_port)
elif option == '17':
try:
print(f" {connect}")
print(" {1}[{0} + {1}]{2} Введите, где вы хотите сохранить файл.\n".format(REDL, GNSL, WHSL))
location = input(f" {arrow} Android Debug Bridge {GNSL}[{REDL} wpa_grub {GNSL}]{ENDL}:")
os.system(f"adb -s {device_name} shell su -c 'cp /data/misc/wifi/wpa_supplicant.conf /sdcard/'")
os.system(f"adb -s {device_name} pull /sdcard/wpa_supplicant.conf {location}")
except KeyboardInterrupt:
if not device_name:
print("{1} [{0} + {1}]{2} Устройства не подключены.".format(REDL, GNSL, WHSL))
elif option == '18':
if not device_name:
print("{1} [{0} + {1}]{2} Устройства не подключены.".format(REDL, GNSL, WHSL))
else:
os.system(f"adb -s {device_name} shell ip address show wlan0")
elif option == '19':
if not device_name:
print("{1} [{0} + {1}]{2} Устройства не подключены.".format(REDL, GNSL, WHSL))
print(f" {connect}")
print(" {1}[{0} + {1}]{2} Введите название apk.\n".format(REDL, GNSL, WHSL))
package_name = input(" "+arrow + " Android Debug Bridge "+GNSL+"["+REDL + " pull_apk " + GNSL + "]"+ENDL + ":")
os.system(f"adb -s {device_name} shell pm path "+package_name)
print(f" {connect}")
print(" {1}[{0} + {1}]{2} Введите путь к apk на устойстве.\n".format(REDL, GNSL, WHSL))
path = input(" "+arrow + " Android Debug Bridge"+GNSL+"["+REDL + " pull_apk " + GNSL + "]"+ENDL + ":")
print(f" {connect}")
print(" {1}[{0} + {1}]{2} Введите место хранения apk.\n".format(REDL, GNSL, WHSL))
location = input(f" {arrow} Android Debug Bridge {GNSL}[{REDL}pull_apk{GNSL}]{ENDL}:")
os.system(f"adb -s {device_name} pull {path} {location}")
t.sleep(5)
show_banner(clear=True)
print(page_1)
elif option == '20':
if not device_name:
print("{1} [{0} + {1}]{2} Устройства не подключены.".format(REDL, GNSL, WHSL))
else:
os.system(f"adb -s {device_name} shell dumpsys battery")
elif option == '21':
if not device_name:
print("{1} [{0} + {1}]{2} Устройства не подключены.".format(REDL, GNSL, WHSL))
else:
os.system(f"adb -s {device_name} shell netstat")
elif option == '22':
if not device_name:
print("{1} [{0} + {1}]{2} Устройства не подключены.".format(REDL, GNSL, WHSL))
else:
print(f" {connect}")
print(" {1}[{0} + {1}]{2} Чтобы | |
of values as JobState.
"""
pulumi.set(__self__, "current_state_time", current_state_time)
pulumi.set(__self__, "execution_stage_name", execution_stage_name)
pulumi.set(__self__, "execution_stage_state", execution_stage_state)
@property
@pulumi.getter(name="currentStateTime")
def current_state_time(self) -> str:
"""
The time at which the stage transitioned to this state.
"""
return pulumi.get(self, "current_state_time")
@property
@pulumi.getter(name="executionStageName")
def execution_stage_name(self) -> str:
"""
The name of the execution stage.
"""
return pulumi.get(self, "execution_stage_name")
@property
@pulumi.getter(name="executionStageState")
def execution_stage_state(self) -> str:
"""
Executions stage states allow the same set of values as JobState.
"""
return pulumi.get(self, "execution_stage_state")
@pulumi.output_type
class ExecutionStageSummaryResponse(dict):
"""
Description of the composing transforms, names/ids, and input/outputs of a stage of execution. Some composing transforms and sources may have been generated by the Dataflow service during execution planning.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "componentSource":
suggest = "component_source"
elif key == "componentTransform":
suggest = "component_transform"
elif key == "inputSource":
suggest = "input_source"
elif key == "outputSource":
suggest = "output_source"
elif key == "prerequisiteStage":
suggest = "prerequisite_stage"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ExecutionStageSummaryResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ExecutionStageSummaryResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ExecutionStageSummaryResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
component_source: Sequence['outputs.ComponentSourceResponse'],
component_transform: Sequence['outputs.ComponentTransformResponse'],
input_source: Sequence['outputs.StageSourceResponse'],
kind: str,
name: str,
output_source: Sequence['outputs.StageSourceResponse'],
prerequisite_stage: Sequence[str]):
"""
Description of the composing transforms, names/ids, and input/outputs of a stage of execution. Some composing transforms and sources may have been generated by the Dataflow service during execution planning.
:param Sequence['ComponentSourceResponse'] component_source: Collections produced and consumed by component transforms of this stage.
:param Sequence['ComponentTransformResponse'] component_transform: Transforms that comprise this execution stage.
:param Sequence['StageSourceResponse'] input_source: Input sources for this stage.
:param str kind: Type of transform this stage is executing.
:param str name: Dataflow service generated name for this stage.
:param Sequence['StageSourceResponse'] output_source: Output sources for this stage.
:param Sequence[str] prerequisite_stage: Other stages that must complete before this stage can run.
"""
pulumi.set(__self__, "component_source", component_source)
pulumi.set(__self__, "component_transform", component_transform)
pulumi.set(__self__, "input_source", input_source)
pulumi.set(__self__, "kind", kind)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "output_source", output_source)
pulumi.set(__self__, "prerequisite_stage", prerequisite_stage)
@property
@pulumi.getter(name="componentSource")
def component_source(self) -> Sequence['outputs.ComponentSourceResponse']:
"""
Collections produced and consumed by component transforms of this stage.
"""
return pulumi.get(self, "component_source")
@property
@pulumi.getter(name="componentTransform")
def component_transform(self) -> Sequence['outputs.ComponentTransformResponse']:
"""
Transforms that comprise this execution stage.
"""
return pulumi.get(self, "component_transform")
@property
@pulumi.getter(name="inputSource")
def input_source(self) -> Sequence['outputs.StageSourceResponse']:
"""
Input sources for this stage.
"""
return pulumi.get(self, "input_source")
@property
@pulumi.getter
def kind(self) -> str:
"""
Type of transform this stage is executing.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> str:
"""
Dataflow service generated name for this stage.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="outputSource")
def output_source(self) -> Sequence['outputs.StageSourceResponse']:
"""
Output sources for this stage.
"""
return pulumi.get(self, "output_source")
@property
@pulumi.getter(name="prerequisiteStage")
def prerequisite_stage(self) -> Sequence[str]:
"""
Other stages that must complete before this stage can run.
"""
return pulumi.get(self, "prerequisite_stage")
@pulumi.output_type
class FileIODetailsResponse(dict):
"""
Metadata for a File connector used by the job.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "filePattern":
suggest = "file_pattern"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in FileIODetailsResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
FileIODetailsResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
FileIODetailsResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
file_pattern: str):
"""
Metadata for a File connector used by the job.
:param str file_pattern: File Pattern used to access files by the connector.
"""
pulumi.set(__self__, "file_pattern", file_pattern)
@property
@pulumi.getter(name="filePattern")
def file_pattern(self) -> str:
"""
File Pattern used to access files by the connector.
"""
return pulumi.get(self, "file_pattern")
@pulumi.output_type
class JobMetadataResponse(dict):
"""
Metadata available primarily for filtering jobs. Will be included in the ListJob response and Job SUMMARY view.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "bigTableDetails":
suggest = "big_table_details"
elif key == "bigqueryDetails":
suggest = "bigquery_details"
elif key == "datastoreDetails":
suggest = "datastore_details"
elif key == "fileDetails":
suggest = "file_details"
elif key == "pubsubDetails":
suggest = "pubsub_details"
elif key == "sdkVersion":
suggest = "sdk_version"
elif key == "spannerDetails":
suggest = "spanner_details"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in JobMetadataResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
JobMetadataResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
JobMetadataResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
big_table_details: Sequence['outputs.BigTableIODetailsResponse'],
bigquery_details: Sequence['outputs.BigQueryIODetailsResponse'],
datastore_details: Sequence['outputs.DatastoreIODetailsResponse'],
file_details: Sequence['outputs.FileIODetailsResponse'],
pubsub_details: Sequence['outputs.PubSubIODetailsResponse'],
sdk_version: 'outputs.SdkVersionResponse',
spanner_details: Sequence['outputs.SpannerIODetailsResponse']):
"""
Metadata available primarily for filtering jobs. Will be included in the ListJob response and Job SUMMARY view.
:param Sequence['BigTableIODetailsResponse'] big_table_details: Identification of a Cloud Bigtable source used in the Dataflow job.
:param Sequence['BigQueryIODetailsResponse'] bigquery_details: Identification of a BigQuery source used in the Dataflow job.
:param Sequence['DatastoreIODetailsResponse'] datastore_details: Identification of a Datastore source used in the Dataflow job.
:param Sequence['FileIODetailsResponse'] file_details: Identification of a File source used in the Dataflow job.
:param Sequence['PubSubIODetailsResponse'] pubsub_details: Identification of a Pub/Sub source used in the Dataflow job.
:param 'SdkVersionResponse' sdk_version: The SDK version used to run the job.
:param Sequence['SpannerIODetailsResponse'] spanner_details: Identification of a Spanner source used in the Dataflow job.
"""
pulumi.set(__self__, "big_table_details", big_table_details)
pulumi.set(__self__, "bigquery_details", bigquery_details)
pulumi.set(__self__, "datastore_details", datastore_details)
pulumi.set(__self__, "file_details", file_details)
pulumi.set(__self__, "pubsub_details", pubsub_details)
pulumi.set(__self__, "sdk_version", sdk_version)
pulumi.set(__self__, "spanner_details", spanner_details)
@property
@pulumi.getter(name="bigTableDetails")
def big_table_details(self) -> Sequence['outputs.BigTableIODetailsResponse']:
"""
Identification of a Cloud Bigtable source used in the Dataflow job.
"""
return pulumi.get(self, "big_table_details")
@property
@pulumi.getter(name="bigqueryDetails")
def bigquery_details(self) -> Sequence['outputs.BigQueryIODetailsResponse']:
"""
Identification of a BigQuery source used in the Dataflow job.
"""
return pulumi.get(self, "bigquery_details")
@property
@pulumi.getter(name="datastoreDetails")
def datastore_details(self) -> Sequence['outputs.DatastoreIODetailsResponse']:
"""
Identification of a Datastore source used in the Dataflow job.
"""
return pulumi.get(self, "datastore_details")
@property
@pulumi.getter(name="fileDetails")
def file_details(self) -> Sequence['outputs.FileIODetailsResponse']:
"""
Identification of a File source used in the Dataflow job.
"""
return pulumi.get(self, "file_details")
@property
@pulumi.getter(name="pubsubDetails")
def pubsub_details(self) -> Sequence['outputs.PubSubIODetailsResponse']:
"""
Identification of a Pub/Sub source used in the Dataflow job.
"""
return pulumi.get(self, "pubsub_details")
@property
@pulumi.getter(name="sdkVersion")
def sdk_version(self) -> 'outputs.SdkVersionResponse':
"""
The SDK version used to run the job.
"""
return pulumi.get(self, "sdk_version")
@property
@pulumi.getter(name="spannerDetails")
def spanner_details(self) -> Sequence['outputs.SpannerIODetailsResponse']:
"""
Identification of a Spanner source used in the Dataflow job.
"""
return pulumi.get(self, "spanner_details")
@pulumi.output_type
class PackageResponse(dict):
"""
The packages that must be installed in order for a worker to run the steps of the Cloud Dataflow job that will be assigned to its worker pool. This is the mechanism by which the Cloud Dataflow SDK causes code to be loaded onto the workers. For example, the Cloud Dataflow Java SDK might use this to install jars containing the user's code and all of the various dependencies (libraries, data files, etc.) required in order for that code to run.
"""
def __init__(__self__, *,
location: str,
name: str):
"""
The packages that must be installed in order for a worker to run the steps of the Cloud Dataflow job that will be assigned to its worker pool. This is the mechanism by which the Cloud Dataflow SDK causes code to be loaded onto the workers. For example, the Cloud Dataflow Java SDK might use this to install jars containing the user's code and all of the various dependencies (libraries, data files, etc.) required in order for that code to run.
:param str location: The resource to read the package from. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket} bucket.storage.googleapis.com/
:param str name: The name of the package.
"""
pulumi.set(__self__, "location", location)
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def location(self) -> str:
"""
The resource to read the package from. The supported resource type is: Google Cloud Storage: storage.googleapis.com/{bucket} bucket.storage.googleapis.com/
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the package.
"""
return pulumi.get(self, "name")
@pulumi.output_type
class ParameterMetadataResponse(dict):
"""
Metadata for a specific parameter.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "customMetadata":
suggest = "custom_metadata"
elif key == "helpText":
suggest = "help_text"
elif key == "isOptional":
suggest = "is_optional"
elif key == "paramType":
suggest = "param_type"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in | |
<filename>pyiron_atomistics/atomistics/structure/analyse.py
# coding: utf-8
# Copyright (c) Max-Planck-Institut für Eisenforschung GmbH - Computational Materials Design (CM) Department
# Distributed under the terms of "New BSD License", see the LICENSE file.
import numpy as np
from sklearn.cluster import AgglomerativeClustering, DBSCAN
from scipy.sparse import coo_matrix
from scipy.spatial import Voronoi, Delaunay
from pyiron_atomistics.atomistics.structure.pyscal import (
get_steinhardt_parameter_structure,
analyse_cna_adaptive,
analyse_centro_symmetry,
analyse_diamond_structure,
analyse_voronoi_volume,
analyse_find_solids,
)
from pyiron_atomistics.atomistics.structure.strain import Strain
from pyiron_base.generic.util import Deprecator
from scipy.spatial import ConvexHull
deprecate = Deprecator()
__author__ = "<NAME>, <NAME>"
__copyright__ = (
"Copyright 2021, Max-Planck-Institut für Eisenforschung GmbH - "
"Computational Materials Design (CM) Department"
)
__version__ = "1.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "production"
__date__ = "Sep 1, 2017"
def get_mean_positions(positions, cell, pbc, labels):
"""
This function calculates the average position(-s) across periodic boundary conditions according
to the labels
Args:
positions (numpy.ndarray (n, 3)): Coordinates to be averaged
cell (numpy.ndarray (3, 3)): Cell dimensions
pbc (numpy.ndarray (3,)): Periodic boundary conditions (in boolean)
labels (numpy.ndarray (n,)): labels according to which the atoms are grouped
Returns:
(numpy.ndarray): mean positions
"""
# Translate labels to integer enumeration (0, 1, 2, ... etc.) and get their counts
_, labels, counts = np.unique(labels, return_inverse=True, return_counts=True)
# Get reference point for each unique label
mean_positions = positions[np.unique(labels, return_index=True)[1]]
# Get displacement vectors from reference points to all other points for the same labels
all_positions = positions - mean_positions[labels]
# Account for pbc
all_positions = np.einsum("ji,nj->ni", np.linalg.inv(cell), all_positions)
all_positions[:, pbc] -= np.rint(all_positions)[:, pbc]
all_positions = np.einsum("ji,nj->ni", cell, all_positions)
# Add average displacement vector of each label to the reference point
np.add.at(mean_positions, labels, (all_positions.T / counts[labels]).T)
return mean_positions
def get_average_of_unique_labels(labels, values):
"""
This function returns the average values of those elements, which share the same labels
Example:
>>> labels = [0, 1, 0, 2]
>>> values = [0, 1, 2, 3]
>>> print(get_average_of_unique_labels(labels, values))
array([1, 1, 3])
"""
labels = np.unique(labels, return_inverse=True)[1]
unique_labels = np.unique(labels)
mat = coo_matrix((np.ones_like(labels), (labels, np.arange(len(labels)))))
mean_values = np.asarray(
mat.dot(np.asarray(values).reshape(len(labels), -1)) / mat.sum(axis=1)
)
if np.prod(mean_values.shape).astype(int) == len(unique_labels):
return mean_values.flatten()
return mean_values
class Interstitials:
"""
Class to search for interstitial sites
This class internally does the following steps:
1. Initialize grid points (or Voronoi vertices) which are considered as
interstitial site candidates.
2. Eliminate points within a distance from the nearest neighboring atoms as
given by `min_distance`
3. Initialize neighbor environment using `get_neighbors`
4. Shift interstitial candidates to the nearest symmetric points with respect to the
neighboring atom sites/vertices.
5. Kick out points with large neighbor distance variances; this eliminates "irregular"
shaped interstitials
6. Cluster interstitial candidates to avoid point overlapping.
The interstitial sites can be obtained via `positions`
In complex structures (i.e. grain boundary, dislocation etc.), the default parameters
should be chosen properly. In order to see other quantities, which potentially
characterize interstitial sites, see the following class methods:
- `get_variances()`
- `get_distances()`
- `get_steinhardt_parameters()`
- `get_volumes()`
- `get_areas()`
"""
def __init__(
self,
structure,
num_neighbors,
n_gridpoints_per_angstrom=5,
min_distance=1,
use_voronoi=False,
variance_buffer=0.01,
n_iterations=2,
eps=0.1,
):
"""
Args:
num_neighbors (int): Number of neighbors/vertices to consider for the interstitial
sites. By definition, tetrahedral sites should have 4 vertices and octahedral
sites 6.
n_gridpoints_per_angstrom (int): Number of grid points per angstrom for the
initialization of the interstitial candidates. The finer the mesh (i.e. the larger
the value), the likelier it is to find the correct sites but then also it becomes
computationally more expensive. Ignored if `use_voronoi` is set to `True`
min_distance (float): Minimum distance from the nearest neighboring atoms to the
positions for them to be considered as interstitial site candidates. Set
`min_distance` to 0 if no point should be removed.
use_voronoi (bool): Use Voronoi vertices for the initial interstitial candidate
positions instead of grid points.
variance_buffer (bool): Maximum permitted variance value (in distance unit) of the
neighbor distance values with respect to the minimum value found for each point.
It should be close to 0 for perfect crystals and slightly higher values for
structures containing defects. Set `variance_buffer` to `numpy.inf` if no selection
by variance value should take place.
n_iterations (int): Number of iterations for the shifting of the candidate positions
to the nearest symmetric positions with respect to `num_neighbors`. In most of the
cases, 1 is enough. In some rare cases (notably tetrahedral sites in bcc), it
should be at least 2. It is unlikely that it has to be larger than 2. Set
`n_iterations` to 0 if no shifting should take place.
eps (float): Distance below which two interstitial candidate sites to be considered as
one site after the symmetrization of the points. Set `eps` to 0 if clustering should
not be done.
"""
self._hull = None
self._neigh = None
self._positions = None
self.num_neighbors = num_neighbors
self.structure = structure
self._initialize(
n_gridpoints_per_angstrom=n_gridpoints_per_angstrom,
min_distance=min_distance,
use_voronoi=use_voronoi,
variance_buffer=variance_buffer,
n_iterations=n_iterations,
eps=eps,
)
def _initialize(
self,
n_gridpoints_per_angstrom=5,
min_distance=1,
use_voronoi=False,
variance_buffer=0.01,
n_iterations=2,
eps=0.1,
):
if use_voronoi:
self.positions = self.structure.analyse.get_voronoi_vertices()
else:
self.positions = self._create_gridpoints(
n_gridpoints_per_angstrom=n_gridpoints_per_angstrom
)
self._remove_too_close(min_distance=min_distance)
for _ in range(n_iterations):
self._set_interstitials_to_high_symmetry_points()
self._kick_out_points(variance_buffer=variance_buffer)
self._cluster_points(eps=eps)
@property
def num_neighbors(self):
"""
Number of atoms (vertices) to consider for each interstitial atom. By definition,
tetrahedral sites should have 4 and octahedral sites 6.
"""
return self._num_neighbors
@num_neighbors.setter
def num_neighbors(self, n):
self.reset()
self._num_neighbors = n
def reset(self):
self._hull = None
self._neigh = None
@property
def neigh(self):
"""
Neighborhood information of each interstitial candidate and their surrounding atoms. E.g.
`class.neigh.distances[0][0]` gives the distance from the first interstitial candidate to
its nearest neighboring atoms. The functionalities of `neigh` follow those of
`pyiron_atomistics.structure.atoms.neighbors`.
"""
if self._neigh is None:
self._neigh = self.structure.get_neighborhood(
self.positions, num_neighbors=self.num_neighbors
)
return self._neigh
@property
def positions(self):
"""
Positions of the interstitial candidates (and not those of the atoms).
IMPORTANT: Do not set positions via numpy setter, i.e.
BAD:
```
>>> Interstitials.neigh.positions[0][0] = x
```
GOOD:
```
>>> positions = Interstitials.neigh.positions
>>> positions[0][0] = x
>>> Interstitialsneigh.positions = positions
```
This is because in the first case related properties (most importantly the neighborhood
information) is not updated, which might lead to inconsistencies.
"""
return self._positions
@positions.setter
def positions(self, x):
self.reset()
self._positions = x
@property
def hull(self):
"""
Convex hull of each atom. It is mainly used for the volume and area calculation of each
interstitial candidate. For more info, see `get_volumes` and `get_areas`.
"""
if self._hull is None:
self._hull = [ConvexHull(v) for v in self.neigh.vecs]
return self._hull
def _create_gridpoints(self, n_gridpoints_per_angstrom=5):
cell = self.structure.get_vertical_length()
n_points = (n_gridpoints_per_angstrom * cell).astype(int)
positions = np.meshgrid(
*[np.linspace(0, 1, n_points[i], endpoint=False) for i in range(3)]
)
positions = np.stack(positions, axis=-1).reshape(-1, 3)
return np.einsum("ji,nj->ni", self.structure.cell, positions)
def _remove_too_close(self, min_distance=1):
neigh = self.structure.get_neighborhood(self.positions, num_neighbors=1)
self.positions = self.positions[neigh.distances.flatten() > min_distance]
def _set_interstitials_to_high_symmetry_points(self):
self.positions = self.positions + np.mean(self.neigh.vecs, axis=-2)
self.positions = self.structure.get_wrapped_coordinates(self.positions)
def _kick_out_points(self, variance_buffer=0.01):
variance = self.get_variances()
min_var = variance.min()
self.positions = self.positions[variance < min_var + variance_buffer]
def _cluster_points(self, eps=0.1):
if eps == 0:
return
extended_positions, indices = self.structure.get_extended_positions(
eps, return_indices=True, positions=self.positions
)
labels = DBSCAN(eps=eps, min_samples=1).fit_predict(extended_positions)
coo = coo_matrix((labels, (np.arange(len(labels)), indices)))
labels = coo.max(axis=0).toarray().flatten()
self.positions = get_mean_positions(
self.positions, self.structure.cell, self.structure.pbc, labels
)
def get_variances(self):
"""
Get variance of neighboring distances. Since interstitial sites are mostly in symmetric
sites, the variance values tend to be small. In the case of fcc, both tetrahedral and
octahedral sites as well as tetrahedral sites in bcc should have the value of 0.
Returns:
(numpy.array (n,)) Variance values
"""
return np.std(self.neigh.distances, axis=-1)
def get_distances(self, function_to_apply=np.min):
"""
Get per-position return values of a given function for the neighbors.
Args:
function_to_apply (function): Function to apply to the distance array. Default is
numpy.minimum
Returns:
(numpy.array (n,)) Function values on the distance array
"""
return function_to_apply(self.neigh.distances, axis=-1)
def get_steinhardt_parameters(self, l):
"""
Args:
l (int/numpy.array): Order of Steinhardt parameter
Returns:
(numpy.array (n,)) Steinhardt parameter values
"""
return self.neigh.get_steinhardt_parameter(l=l)
def get_volumes(self):
"""
Returns:
(numpy.array (n,)): Convex hull volume of each site.
"""
return np.array([h.volume for h in self.hull])
def get_areas(self):
"""
Returns:
(numpy.array (n,)): Convex hull area of each site.
"""
return np.array([h.area for h in self.hull])
class Analyse:
"""Class to analyse atom structure."""
def __init__(self, structure):
"""
Args:
structure (:class:`pyiron.atomistics.structure.atoms.Atoms`): reference Atom structure.
"""
self._structure = structure
def get_interstitials(
| |
MeshVertexIndices(self, topologyVertexIndex):
""" MeshVertexIndices(self: MeshTopologyVertexList, topologyVertexIndex: int) -> Array[int] """
pass
def SortEdges(self, topologyVertexIndex=None):
"""
SortEdges(self: MeshTopologyVertexList, topologyVertexIndex: int) -> bool
SortEdges(self: MeshTopologyVertexList) -> bool
"""
pass
def TopologyVertexIndex(self, vertexIndex):
""" TopologyVertexIndex(self: MeshTopologyVertexList, vertexIndex: int) -> int """
pass
def __contains__(self, *args): #cannot find CLR method
""" __contains__[Point3f](enumerable: IEnumerable[Point3f], value: Point3f) -> bool """
pass
def __getitem__(self, *args): #cannot find CLR method
""" x.__getitem__(y) <==> x[y] """
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __iter__(self, *args): #cannot find CLR method
""" __iter__(self: IEnumerable) -> object """
pass
def __repr__(self, *args): #cannot find CLR method
""" __repr__(self: object) -> str """
pass
def __setitem__(self, *args): #cannot find CLR method
""" x.__setitem__(i, y) <==> x[i]= """
pass
Count = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: Count(self: MeshTopologyVertexList) -> int
"""
class MeshVertexColorList(object, IEnumerable[Color], IEnumerable, IRhinoTable[Color]):
# no doc
def Add(self, *__args):
"""
Add(self: MeshVertexColorList, color: Color) -> int
Add(self: MeshVertexColorList, red: int, green: int, blue: int) -> int
"""
pass
def AppendColors(self, colors):
""" AppendColors(self: MeshVertexColorList, colors: Array[Color]) -> bool """
pass
def Clear(self):
""" Clear(self: MeshVertexColorList) """
pass
def CreateMonotoneMesh(self, baseColor):
""" CreateMonotoneMesh(self: MeshVertexColorList, baseColor: Color) -> bool """
pass
def GetEnumerator(self):
""" GetEnumerator(self: MeshVertexColorList) -> IEnumerator[Color] """
pass
def SetColor(self, *__args):
"""
SetColor(self: MeshVertexColorList, face: MeshFace, color: Color) -> bool
SetColor(self: MeshVertexColorList, index: int, color: Color) -> bool
SetColor(self: MeshVertexColorList, index: int, red: int, green: int, blue: int) -> bool
"""
pass
def SetColors(self, colors):
""" SetColors(self: MeshVertexColorList, colors: Array[Color]) -> bool """
pass
def __add__(self, *args): #cannot find CLR method
""" x.__add__(y) <==> x+yx.__add__(y) <==> x+y """
pass
def __contains__(self, *args): #cannot find CLR method
""" __contains__[Color](enumerable: IEnumerable[Color], value: Color) -> bool """
pass
def __getitem__(self, *args): #cannot find CLR method
""" x.__getitem__(y) <==> x[y] """
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __iter__(self, *args): #cannot find CLR method
""" __iter__(self: IEnumerable) -> object """
pass
def __repr__(self, *args): #cannot find CLR method
""" __repr__(self: object) -> str """
pass
def __setitem__(self, *args): #cannot find CLR method
""" x.__setitem__(i, y) <==> x[i]= """
pass
Count = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: Count(self: MeshVertexColorList) -> int
Set: Count(self: MeshVertexColorList) = value
"""
Tag = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: Tag(self: MeshVertexColorList) -> MappingTag
Set: Tag(self: MeshVertexColorList) = value
"""
class MeshVertexList(object, IEnumerable[Point3f], IEnumerable, IRhinoTable[Point3f]):
# no doc
def Add(self, *__args):
"""
Add(self: MeshVertexList, vertex: Point3f) -> int
Add(self: MeshVertexList, vertex: Point3d) -> int
Add(self: MeshVertexList, x: Single, y: Single, z: Single) -> int
Add(self: MeshVertexList, x: float, y: float, z: float) -> int
"""
pass
def AddVertices(self, vertices):
""" AddVertices(self: MeshVertexList, vertices: IEnumerable[Point3f])AddVertices(self: MeshVertexList, vertices: IEnumerable[Point3d]) """
pass
def Clear(self):
""" Clear(self: MeshVertexList) """
pass
def CombineIdentical(self, ignoreNormals, ignoreAdditional):
""" CombineIdentical(self: MeshVertexList, ignoreNormals: bool, ignoreAdditional: bool) -> bool """
pass
def CullUnused(self):
""" CullUnused(self: MeshVertexList) -> int """
pass
def GetConnectedVertices(self, vertexIndex):
""" GetConnectedVertices(self: MeshVertexList, vertexIndex: int) -> Array[int] """
pass
def GetEnumerator(self):
""" GetEnumerator(self: MeshVertexList) -> IEnumerator[Point3f] """
pass
def GetTopologicalIndenticalVertices(self, vertexIndex):
""" GetTopologicalIndenticalVertices(self: MeshVertexList, vertexIndex: int) -> Array[int] """
pass
def GetVertexFaces(self, vertexIndex):
""" GetVertexFaces(self: MeshVertexList, vertexIndex: int) -> Array[int] """
pass
def Hide(self, vertexIndex):
""" Hide(self: MeshVertexList, vertexIndex: int) """
pass
def HideAll(self):
""" HideAll(self: MeshVertexList) """
pass
def IsHidden(self, vertexIndex):
""" IsHidden(self: MeshVertexList, vertexIndex: int) -> bool """
pass
def Remove(self, *__args):
"""
Remove(self: MeshVertexList, indices: IEnumerable[int], shrinkFaces: bool) -> bool
Remove(self: MeshVertexList, index: int, shrinkFaces: bool) -> bool
"""
pass
def SetVertex(self, index, *__args):
"""
SetVertex(self: MeshVertexList, index: int, vertex: Point3f) -> bool
SetVertex(self: MeshVertexList, index: int, vertex: Point3d) -> bool
SetVertex(self: MeshVertexList, index: int, x: Single, y: Single, z: Single) -> bool
SetVertex(self: MeshVertexList, index: int, x: float, y: float, z: float) -> bool
"""
pass
def Show(self, vertexIndex):
""" Show(self: MeshVertexList, vertexIndex: int) """
pass
def ShowAll(self):
""" ShowAll(self: MeshVertexList) """
pass
def ToFloatArray(self):
""" ToFloatArray(self: MeshVertexList) -> Array[Single] """
pass
def ToPoint3dArray(self):
""" ToPoint3dArray(self: MeshVertexList) -> Array[Point3d] """
pass
def ToPoint3fArray(self):
""" ToPoint3fArray(self: MeshVertexList) -> Array[Point3f] """
pass
def __add__(self, *args): #cannot find CLR method
""" x.__add__(y) <==> x+yx.__add__(y) <==> x+yx.__add__(y) <==> x+yx.__add__(y) <==> x+y """
pass
def __contains__(self, *args): #cannot find CLR method
""" __contains__[Point3f](enumerable: IEnumerable[Point3f], value: Point3f) -> bool """
pass
def __getitem__(self, *args): #cannot find CLR method
""" x.__getitem__(y) <==> x[y] """
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __iter__(self, *args): #cannot find CLR method
""" __iter__(self: IEnumerable) -> object """
pass
def __repr__(self, *args): #cannot find CLR method
""" __repr__(self: object) -> str """
pass
def __setitem__(self, *args): #cannot find CLR method
""" x.__setitem__(i, y) <==> x[i]= """
pass
Count = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: Count(self: MeshVertexList) -> int
Set: Count(self: MeshVertexList) = value
"""
class MeshVertexNormalList(object, IEnumerable[Vector3f], IEnumerable, IRhinoTable[Vector3f]):
# no doc
def Add(self, *__args):
"""
Add(self: MeshVertexNormalList, normal: Vector3f) -> int
Add(self: MeshVertexNormalList, normal: Vector3d) -> int
Add(self: MeshVertexNormalList, x: Single, y: Single, z: Single) -> int
Add(self: MeshVertexNormalList, x: float, y: float, z: float) -> int
"""
pass
def AddRange(self, normals):
""" AddRange(self: MeshVertexNormalList, normals: Array[Vector3f]) -> bool """
pass
def Clear(self):
""" Clear(self: MeshVertexNormalList) """
pass
def ComputeNormals(self):
""" ComputeNormals(self: MeshVertexNormalList) -> bool """
pass
def Flip(self):
""" Flip(self: MeshVertexNormalList) """
pass
def GetEnumerator(self):
""" GetEnumerator(self: MeshVertexNormalList) -> IEnumerator[Vector3f] """
pass
def SetNormal(self, index, *__args):
"""
SetNormal(self: MeshVertexNormalList, index: int, normal: Vector3f) -> bool
SetNormal(self: MeshVertexNormalList, index: int, normal: Vector3d) -> bool
SetNormal(self: MeshVertexNormalList, index: int, x: Single, y: Single, z: Single) -> bool
SetNormal(self: MeshVertexNormalList, index: int, x: float, y: float, z: float) -> bool
"""
pass
def SetNormals(self, normals):
""" SetNormals(self: MeshVertexNormalList, normals: Array[Vector3f]) -> bool """
pass
def UnitizeNormals(self):
""" UnitizeNormals(self: MeshVertexNormalList) -> bool """
pass
def __add__(self, *args): #cannot find CLR method
""" x.__add__(y) <==> x+yx.__add__(y) <==> x+yx.__add__(y) <==> x+yx.__add__(y) <==> x+y """
pass
def __contains__(self, *args): #cannot find CLR method
""" __contains__[Vector3f](enumerable: IEnumerable[Vector3f], value: Vector3f) -> bool """
pass
def __getitem__(self, *args): #cannot find CLR method
""" x.__getitem__(y) <==> x[y] """
pass
def __init__(self, *args): #cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __iter__(self, *args): #cannot find CLR method
""" __iter__(self: IEnumerable) -> object """
pass
def __repr__(self, *args): #cannot find CLR method
""" __repr__(self: object) -> str """
pass
def __setitem__(self, *args): #cannot find CLR method
""" x.__setitem__(i, y) <==> x[i]= """
pass
Count = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: Count(self: MeshVertexNormalList) -> int
Set: Count(self: MeshVertexNormalList) = value
"""
class NurbsCurveKnotList(object, IEnumerable[float], IEnumerable, IRhinoTable[float], IEpsilonComparable[NurbsCurveKnotList]):
# no doc
def ClampEnd(self, end):
""" ClampEnd(self: NurbsCurveKnotList, end: CurveEnd) -> bool """
pass
def CreatePeriodicKnots(self, knotSpacing):
""" CreatePeriodicKnots(self: NurbsCurveKnotList, knotSpacing: float) -> bool """
pass
def CreateUniformKnots(self, knotSpacing):
""" CreateUniformKnots(self: NurbsCurveKnotList, knotSpacing: float) -> bool """
pass
def EnsurePrivateCopy(self):
""" EnsurePrivateCopy(self: NurbsCurveKnotList) """
pass
def EpsilonEquals(self, other, epsilon):
""" EpsilonEquals(self: NurbsCurveKnotList, other: NurbsCurveKnotList, epsilon: float) -> bool """
pass
def InsertKnot(self, value, multiplicity=None):
"""
InsertKnot(self: NurbsCurveKnotList, value: float, multiplicity: int) -> bool
InsertKnot(self: NurbsCurveKnotList, value: float) -> bool
"""
pass
def KnotMultiplicity(self, index):
""" KnotMultiplicity(self: NurbsCurveKnotList, index: int) -> int """
pass
def SuperfluousKnot(self, start):
""" SuperfluousKnot(self: NurbsCurveKnotList, start: bool) -> float """
pass
def __contains__(self, *args): #cannot find CLR method
""" __contains__[float](enumerable: IEnumerable[float], value: float) -> bool """
pass
def __getitem__(self, | |
layer_id = 'None'
# Get metric name, first get metric id from the features profile
# record
try:
metric_list = [row[2] for row in ionosphere_summary_list if row[0] == fp_id]
metric = metric_list[0]
except:
metric = 'UNKNOWN'
uri_to_matched_page = 'None'
matches.append([metric_human_date, match_id, matched_by, fp_id, layer_id, metric, uri_to_matched_page])
if get_layers_matched:
# layers matches
new_query_string = query_string.replace('ionosphere_matched', 'ionosphere_layers_matched')
query_string = new_query_string
new_query_string = query_string.replace('metric_timestamp', 'anomaly_timestamp')
query_string = new_query_string
try:
connection = engine.connect()
stmt = query_string
logger.info('executing %s' % stmt)
results = connection.execute(stmt)
connection.close()
except:
trace = traceback.format_exc()
logger.error(traceback.format_exc())
logger.error('error :: could not determine metrics from metrics table')
# @added 20170806 - Bug #2130: MySQL - Aborted_clients
# Added missing disposal and raise
if engine:
engine_disposal(engine)
return False, fail_msg, trace
for row in results:
anomaly_timestamp = int(row['anomaly_timestamp'])
metric_human_date = time.strftime('%Y-%m-%d %H:%M:%S %Z (%A)', time.localtime(int(anomaly_timestamp)))
match_id = int(row['id'])
# @modified 20180921 - Feature #2558: Ionosphere - fluid approximation - approximately_close on layers
# matched_by = 'layers'
try:
approx_close = int(row['approx_close'])
except:
approx_close = 0
if approx_close == 0:
matched_by = 'layers'
else:
matched_by = 'layers - approx_close'
fp_id = int(row['fp_id'])
layer_id = int(row['layer_id'])
# Get metric name, first get metric id from the features profile
# record
try:
metric_list = [row[2] for row in ionosphere_summary_list if row[0] == fp_id]
metric = metric_list[0]
except:
metric = 'UNKNOWN'
uri_to_matched_page = 'None'
matches.append([metric_human_date, match_id, matched_by, fp_id, layer_id, metric, uri_to_matched_page])
sorted_matches = sorted(matches, key=lambda x: x[0])
matches = sorted_matches
if engine:
engine_disposal(engine)
try:
del metric_list
except:
logger.error('error :: failed to del metrics_list')
# @added 20180809 - Bug #2496: error reported on no matches found
# https://github.com/earthgecko/skyline/issues/64
# If there are no matches return this information in matches to prevent
# webapp from reporting an error
if not matches:
# [[1505560867, 39793, 'features_profile', 782, 'None', 'stats.skyline-dev-3-40g-gra1.vda.ioInProgress', 'ionosphere?fp_matched=true...'],
# @modified 20180921 - Feature #2558: Ionosphere - fluid approximation - approximately_close on layers
# matches = [['None', 'None', 'no matches were found', 'None', 'None', 'no matches were found', 'None']]
matches = [['None', 'None', 'no matches were found', 'None', 'None', 'no matches were found', 'None', 'None']]
return matches, fail_msg, trace
# @added 20170917 - Feature #1996: Ionosphere - matches page
def get_matched_id_resources(matched_id, matched_by, metric, requested_timestamp):
"""
Get the Ionosphere matched details of a features profile or layer
:param matched_id: the matched id
:type id: int
:param matched_by: either features_profile or layers
:type id: str
:param metric: metric base_name
:type id: str
:param requested_timestamp: the timestamp of the features profile
:type id: int
:return: tuple
:rtype: (str, boolean, str, str)
"""
logger = logging.getLogger(skyline_app_logger)
function_str = 'ionoshere_backend.py :: get_matched_id_resources'
trace = 'none'
fail_msg = 'none'
matched_details = None
use_table = 'ionosphere_matched'
if matched_by == 'layers':
use_table = 'ionosphere_layers_matched'
logger.info('%s :: getting MySQL engine' % function_str)
try:
engine, fail_msg, trace = get_an_engine()
logger.info(fail_msg)
except:
trace = traceback.format_exc()
logger.error(trace)
fail_msg = 'error :: could not get a MySQL engine'
logger.error('%s' % fail_msg)
# return False, False, fail_msg, trace, False
raise # to webapp to return in the UI
if not engine:
trace = 'none'
fail_msg = 'error :: engine not obtained'
logger.error(fail_msg)
# return False, False, fail_msg, trace, False
raise # to webapp to return in the UI
if matched_by == 'features_profile':
ionosphere_matched_table = None
try:
ionosphere_matched_table, fail_msg, trace = ionosphere_matched_table_meta(skyline_app, engine)
logger.info(fail_msg)
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
if matched_by == 'layers':
ionosphere_layers_matched_table = None
try:
ionosphere_layers_matched_table, fail_msg, trace = ionosphere_layers_matched_table_meta(skyline_app, engine)
logger.info(fail_msg)
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
if trace != 'none':
fail_msg = 'error :: failed to get %s table for matched id %s' % (use_table, str(matched_id))
logger.error('%s' % fail_msg)
if engine:
engine_disposal(engine)
# return False, False, fail_msg, trace, False
raise # to webapp to return in the UI
logger.info('%s :: %s table OK' % (function_str, use_table))
if matched_by == 'features_profile':
stmt = select([ionosphere_matched_table]).where(ionosphere_matched_table.c.id == int(matched_id))
if matched_by == 'layers':
stmt = select([ionosphere_layers_matched_table]).where(ionosphere_layers_matched_table.c.id == int(matched_id))
try:
connection = engine.connect()
# stmt = select([ionosphere_matched_table]).where(ionosphere_matched_table.c.id == int(matched_id))
result = connection.execute(stmt)
row = result.fetchone()
matched_details_object = row
connection.close()
except:
trace = traceback.format_exc()
logger.error(trace)
fail_msg = 'error :: could not get matched_id %s details from %s DB table' % (str(matched_id), use_table)
logger.error('%s' % fail_msg)
if engine:
engine_disposal(engine)
# return False, False, fail_msg, trace, False
raise # to webapp to return in the UI
if matched_by == 'features_profile':
try:
fp_id = row['fp_id']
metric_timestamp = row['metric_timestamp']
all_calc_features_sum = row['all_calc_features_sum']
all_calc_features_count = row['all_calc_features_count']
sum_common_values = row['sum_common_values']
common_features_count = row['common_features_count']
tsfresh_version = row['tsfresh_version']
matched_human_date = time.strftime('%Y-%m-%d %H:%M:%S %Z (%A)', time.localtime(int(metric_timestamp)))
# @added 20180620 - Feature #2404: Ionosphere - fluid approximation
# Added minmax scaling
minmax = int(row['minmax'])
minmax_fp_features_sum = row['minmax_fp_features_sum']
minmax_fp_features_count = row['minmax_fp_features_count']
minmax_anomalous_features_sum = row['minmax_anomalous_features_sum']
minmax_anomalous_features_count = row['minmax_anomalous_features_count']
matched_details = '''
tsfresh_version :: %s
all_calc_features_sum :: %s | all_calc_features_count :: %s
sum_common_values :: %s | common_features_count :: %s
metric_timestamp :: %s | human_date :: %s
minmax_scaled :: %s
minmax_fp_features_sum :: %s | minmax_fp_features_count :: %s
minmax_anomalous_features_sum :: %s | minmax_anomalous_features_count :: %s
''' % (str(tsfresh_version), str(all_calc_features_sum),
str(all_calc_features_count), str(sum_common_values),
str(common_features_count), str(metric_timestamp),
str(matched_human_date), str(minmax),
str(minmax_fp_features_sum), str(minmax_fp_features_count),
str(minmax_anomalous_features_sum),
str(minmax_anomalous_features_count))
except:
trace = traceback.format_exc()
logger.error(trace)
fail_msg = 'error :: could not get details for matched id %s' % str(matched_id)
logger.error('%s' % fail_msg)
if engine:
engine_disposal(engine)
# return False, False, fail_msg, trace, False
raise # to webapp to return in the UI
full_duration_stmt = 'SELECT full_duration FROM ionosphere WHERE id=%s' % str(fp_id)
full_duration = None
try:
connection = engine.connect()
result = connection.execute(full_duration_stmt)
connection.close()
for row in result:
if not full_duration:
full_duration = int(row[0])
logger.info('full_duration for matched determined as %s' % (str(full_duration)))
except:
trace = traceback.format_exc()
logger.error(trace)
logger.error('error :: could not determine full_duration from ionosphere table')
# Disposal and return False, fail_msg, trace for Bug #2130: MySQL - Aborted_clients
if engine:
engine_disposal(engine)
return False, fail_msg, trace
if matched_by == 'layers':
try:
layer_id = row['layer_id']
fp_id = row['fp_id']
metric_timestamp = row['anomaly_timestamp']
anomalous_datapoint = row['anomalous_datapoint']
full_duration = row['full_duration']
matched_human_date = time.strftime('%Y-%m-%d %H:%M:%S %Z (%A)', time.localtime(int(metric_timestamp)))
matched_details = '''
layer_id :: %s
anomalous_datapoint :: %s
full_duration :: %s
metric_timestamp :: %s | human_date :: %s
''' % (str(layer_id), str(anomalous_datapoint), str(full_duration),
str(metric_timestamp), str(matched_human_date))
except:
trace = traceback.format_exc()
logger.error(trace)
fail_msg = 'error :: could not get details for matched id %s' % str(matched_id)
logger.error('%s' % fail_msg)
if engine:
engine_disposal(engine)
# return False, False, fail_msg, trace, False
raise # to webapp to return in the UI
if engine:
engine_disposal(engine)
# Create a Graphite image
from_timestamp = str(int(metric_timestamp) - int(full_duration))
until_timestamp = str(metric_timestamp)
timeseries_dir = metric.replace('.', '/')
metric_data_dir = '%s/%s/%s' % (
settings.IONOSPHERE_PROFILES_FOLDER, timeseries_dir,
str(requested_timestamp))
if matched_by == 'features_profile':
graph_image_file = '%s/%s.matched.fp_id-%s.%s.png' % (
metric_data_dir, metric, str(fp_id), str(metric_timestamp))
if matched_by == 'layers':
graph_image_file = '%s/%s.layers_id-%s.matched.layers.fp_id-%s.%s.png' % (
metric_data_dir, metric, str(matched_id),
str(fp_id), str(layer_id))
if not path.isfile(graph_image_file):
logger.info('getting Graphite graph for match - from_timestamp - %s, until_timestamp - %s' % (str(from_timestamp), str(until_timestamp)))
graph_image = get_graphite_metric(
skyline_app, metric, from_timestamp, until_timestamp, 'image',
graph_image_file)
if not graph_image:
logger.error('failed getting Graphite graph')
graph_image_file = None
return matched_details, True, fail_msg, trace, matched_details_object, graph_image_file
# @added 20180812 - Feature #2430: Ionosphere validate learnt features profiles page
def get_features_profiles_to_validate(base_name):
"""
Get the details for Ionosphere features profiles that need to be validated
for a metric and returns a list of the details for each of the features
profile including the ionosphere_image API URIs for all the relevant graph
images for the weabpp Ionosphere validate_features_profiles page.
[[ fp_id, metric_id, metric, fp_full_duration, anomaly_timestamp,
fp_parent_id, parent_full_duration, parent_anomaly_timestamp, fp_date,
fp_graph_uri, parent_fp_date, parent_fp_graph_uri, parent_parent_fp_id,
fp_learn_graph_uri, parent_fp_learn_graph_uri, minimum_full_duration,
maximum_full_duration]]
:param base_name: metric base_name
:type base_name: str
:return: list of lists
:rtype: [[int, int, str, int, int, int, int, int, str, str, str, str, int, str, str, int, int]]
"""
logger = logging.getLogger(skyline_app_logger)
function_str = 'ionoshere_backend.py :: get_feature_profiles_validate'
trace = 'none'
fail_msg = 'none'
# Query the ionosphere_functions function for base_name, validated == false
# and get the details for each features profile that needs to be validated
features_profiles_to_validate = []
search_success = False
fps = []
try:
fps, fps_count, mc, cc, gc, full_duration_list, enabled_list, tsfresh_version_list, generation_list, search_success, fail_msg, trace = ionosphere_search(False, True)
logger.info('fp object :: %s' % str(fps))
except:
trace = traceback.format_exc()
fail_msg | |
<filename>src/pkgcore/ebuild/portage_conf.py
"""make.conf translator.
Converts portage config files into :obj:`pkgcore.config` form.
"""
__all__ = (
'PortageConfig', 'SecurityUpgradesViaProfile',
)
import configparser
import errno
import os
import sys
from collections import OrderedDict
from snakeoil.bash import read_bash_dict
from snakeoil.compatibility import IGNORED_EXCEPTIONS
from snakeoil.mappings import DictMixin, ImmutableDict
from snakeoil.osutils import access, listdir_files, pjoin
from .. import const
from .. import exceptions as base_errors
from ..config import basics
from ..config import errors as config_errors
from ..config.hint import configurable
from ..fs.livefs import sorted_scan
from ..log import logger
from ..pkgsets.glsa import SecurityUpgrades
from . import const as econst
from . import profiles, repo_objs
from .misc import optimize_incrementals
from .repository import errors as repo_errors
def my_convert_hybrid(manager, val, arg_type):
"""Modified convert_hybrid using a sequence of strings for section_refs."""
if arg_type.startswith('refs:'):
subtype = 'ref:' + arg_type.split(':', 1)[1]
return [basics.LazyNamedSectionRef(manager, subtype, name) for name in val]
return basics.convert_hybrid(manager, val, arg_type)
@configurable({'ebuild_repo': 'ref:repo', 'vdb': 'ref:repo',
'profile': 'ref:profile'}, typename='pkgset')
def SecurityUpgradesViaProfile(ebuild_repo, vdb, profile):
"""generate a GLSA vuln. pkgset limited by profile
Args:
ebuild_repo (:obj:`pkgcore.ebuild.repository.UnconfiguredTree`): target repo
vdb (:obj:`pkgcore.repository.prototype.tree`): livefs
profile (:obj:`pkgcore.ebuild.profiles`): target profile
Returns:
pkgset of relevant security upgrades
"""
arch = profile.arch
if arch is None:
raise config_errors.ComplexInstantiationError("arch wasn't set in profiles")
return SecurityUpgrades(ebuild_repo, vdb, arch)
class ParseConfig(configparser.ConfigParser):
"""Custom ConfigParser class to support returning dict objects."""
def parse_file(self, f, reset=True):
"""Parse config data from a given file handle.
By default the underlying config data is reset on each call if it
exists. This allows multiple files to be easily parsed by a single instance
without combining all the data in one instance.
Args:
f: iterable yielding unicode strings (opened file handle)
reset (boolean): reset config data if it exists before parsing
Returns:
dict: default settings
dict: regular section settings
"""
if self._defaults and reset:
self._defaults = self._dict()
if self._sections and reset:
self._sections = self._dict()
# currently we don't reset section proxies as they should affect
# this direct data dumping
self.read_file(f)
return self._defaults, self._sections
class PortageConfig(DictMixin):
"""Support for portage's config file layout."""
_supported_repo_types = {}
def __init__(self, location=None, profile_override=None, **kwargs):
"""
Args:
location (optional[str]): path to the portage config directory,
(defaults to /etc/portage)
profile_override (optional[str]): profile to use instead of the current system
profile, i.e. the target of the /etc/portage/make.profile symlink
root (optional[str]): target root filesystem (defaults to /)
buildpkg (optional[bool]): forcibly disable/enable building binpkgs, otherwise
FEATURES=buildpkg from make.conf is used
Returns:
dict: config settings
"""
self._config = {}
stubconfig = pjoin(const.DATA_PATH, 'stubconfig')
if location is None:
path = os.path.abspath(sys.prefix)
while (parent := os.path.dirname(path)) != path:
config_root = pjoin(parent, 'etc/portage')
if os.path.exists(config_root):
location = config_root
break
path = parent
else:
# fallback to stub config non-Gentoo systems
location = stubconfig
# override profile when using stub config
if location == stubconfig:
profile_override = pjoin(const.DATA_PATH, 'stubrepo/profiles/default')
self.dir = location
# this actually differs from portage parsing- we allow
# make.globals to provide vars used in make.conf, portage keeps
# them separate (kind of annoying)
#
# this isn't preserving incremental behaviour for features/use unfortunately
make_conf = {}
try:
self.load_make_conf(make_conf, pjoin(const.CONFIG_PATH, 'make.globals'))
except IGNORED_EXCEPTIONS:
raise
except Exception as e:
raise config_errors.ParsingError("failed to load make.globals") from e
self.load_make_conf(
make_conf, pjoin(self.dir, 'make.conf'), required=False,
allow_sourcing=True, incrementals=True)
self.root = kwargs.pop('root', make_conf.get("ROOT", "/"))
gentoo_mirrors = [
x.rstrip("/") + "/distfiles" for x in make_conf.pop("GENTOO_MIRRORS", "").split()]
self.features = frozenset(
optimize_incrementals(make_conf.get('FEATURES', '').split()))
self._add_sets()
self._add_profile(profile_override)
self['vdb'] = basics.AutoConfigSection({
'class': 'pkgcore.vdb.ondisk.tree',
'location': pjoin(self.root, 'var', 'db', 'pkg'),
'cache_location': '/var/cache/edb/dep/var/db/pkg',
})
try:
repos_conf_defaults, repos_conf = self.load_repos_conf(
pjoin(self.dir, 'repos.conf'))
except config_errors.ParsingError as e:
if not getattr(getattr(e, 'exc', None), 'errno', None) == errno.ENOENT:
raise
try:
# fallback to defaults provided by pkgcore
repos_conf_defaults, repos_conf = self.load_repos_conf(
pjoin(const.CONFIG_PATH, 'repos.conf'))
except IGNORED_EXCEPTIONS:
raise
except Exception as e:
raise config_errors.ParsingError('failed to find a usable repos.conf') from e
self['ebuild-repo-common'] = basics.AutoConfigSection({
'class': 'pkgcore.ebuild.repository.tree',
'default_mirrors': gentoo_mirrors,
'inherit-only': True,
})
repo_map = {}
repos = []
for repo_name, repo_opts in list(repos_conf.items()):
repo_cls = repo_opts.pop('repo-type')
try:
repo = repo_cls(
self, repo_name=repo_name, repo_opts=repo_opts,
repo_map=repo_map, defaults=repos_conf_defaults)
except repo_errors.UnsupportedRepo as e:
logger.warning(
f'skipping {repo_name!r} repo: unsupported EAPI {str(e.repo.eapi)!r}')
del repos_conf[repo_name]
continue
# only register existent repos
if os.path.exists(repo_opts['location']):
self[repo_name] = basics.AutoConfigSection(repo)
repos.append(repo_name)
# XXX: Hack for portage-2 profile format support. We need to figure out how
# to dynamically create this from the config at runtime on attr access.
profiles.ProfileNode._repo_map = ImmutableDict(repo_map)
self._make_repo_syncers(repos_conf, make_conf)
if repos:
self['repo-stack'] = basics.FakeIncrementalDictConfigSection(
my_convert_hybrid, {
'class': 'pkgcore.repository.multiplex.config_tree',
'repos': tuple(repos)})
self['vuln'] = basics.AutoConfigSection({
'class': SecurityUpgradesViaProfile,
'ebuild_repo': 'repo-stack',
'vdb': 'vdb',
'profile': 'profile',
})
# check if package building was forced on by the user
forced_buildpkg = kwargs.pop('buildpkg', False)
if forced_buildpkg:
make_conf['FEATURES'] += ' buildpkg'
# finally... domain.
make_conf.update({
'class': 'pkgcore.ebuild.domain.domain',
'repos': tuple(repos),
'default': True,
'vdb': ('vdb',),
'profile': 'profile',
'root': self.root,
'config_dir': self.dir,
})
self['livefs'] = basics.FakeIncrementalDictConfigSection(
my_convert_hybrid, make_conf)
def __setitem__(self, key, value):
self._config[key] = value
def __getitem__(self, key):
return self._config[key]
def __delitem__(self, key):
del self._config[key]
def keys(self):
return iter(self._config.keys())
@staticmethod
def load_make_conf(vars_dict, path, allow_sourcing=False, required=True,
allow_recurse=True, incrementals=False):
"""parse make.conf files
Args:
vars_dict (dict): dictionary to add parsed variables to
path (str): path to the make.conf which can be a regular file or
directory, if a directory is passed all the non-hidden files within
that directory are parsed in alphabetical order.
"""
sourcing_command = 'source' if allow_sourcing else None
if allow_recurse:
files = sorted_scan(
os.path.realpath(path), follow_symlinks=True, nonexistent=True,
hidden=False, backup=False)
else:
files = (path,)
for fp in files:
try:
new_vars = read_bash_dict(
fp, vars_dict=vars_dict, sourcing_command=sourcing_command)
except PermissionError as e:
raise base_errors.PermissionDenied(fp, write=False) from e
except EnvironmentError as e:
if e.errno != errno.ENOENT or required:
raise config_errors.ParsingError(f"parsing {fp!r}", exception=e) from e
return
if incrementals:
for key in econst.incrementals:
if key in vars_dict and key in new_vars:
new_vars[key] = f"{vars_dict[key]} {new_vars[key]}"
# quirk of read_bash_dict; it returns only what was mutated.
vars_dict.update(new_vars)
@classmethod
def load_repos_conf(cls, path):
"""parse repos.conf files
Args:
path (str): path to the repos.conf which can be a regular file or
directory, if a directory is passed all the non-hidden files within
that directory are parsed in alphabetical order.
Returns:
dict: global repo settings
dict: repo settings
"""
main_defaults = {}
repos = {}
parser = ParseConfig()
for fp in sorted_scan(
os.path.realpath(path), follow_symlinks=True, nonexistent=True,
hidden=False, backup=False):
try:
with open(fp) as f:
defaults, repo_confs = parser.parse_file(f)
except PermissionError as e:
raise base_errors.PermissionDenied(fp, write=False) from e
except EnvironmentError as e:
raise config_errors.ParsingError(f"parsing {fp!r}", exception=e) from e
except configparser.Error as e:
raise config_errors.ParsingError(f"repos.conf: {fp!r}", exception=e) from e
if defaults and main_defaults:
logger.warning(f"repos.conf: parsing {fp!r}: overriding DEFAULT section")
main_defaults.update(defaults)
for name, repo_conf in repo_confs.items():
if name in repos:
logger.warning(f"repos.conf: parsing {fp!r}: overriding {name!r} repo")
# ignore repo if location is unset
location = repo_conf.get('location', None)
if location is None:
logger.warning(
f"repos.conf: parsing {fp!r}: "
f"{name!r} repo missing location setting, ignoring repo")
continue
location = os.path.expanduser(location)
if os.path.isabs(location):
repo_conf['location'] = location
else:
# support relative paths based on where repos.conf is located
repo_conf['location'] = os.path.abspath(
pjoin(os.path.dirname(path), location))
# repo type defaults to ebuild for compat with portage
repo_type = repo_conf.get('repo-type', 'ebuild-v1')
try:
repo_conf['repo-type'] = cls._supported_repo_types[repo_type]
except KeyError:
logger.warning(
f"repos.conf: parsing {fp!r}: "
f"{name!r} repo has unsupported repo-type {repo_type!r}, "
"ignoring repo")
continue
# Priority defaults to zero if unset or invalid for ebuild repos
# while binpkg repos have the lowest priority by default.
priority = repo_conf.get('priority', None)
if priority is None:
if repo_type.startswith('binpkg'):
priority = -10000
else:
priority = 0
try:
priority = int(priority)
except ValueError:
logger.warning(
f"repos.conf: parsing {fp!r}: {name!r} repo has invalid priority "
f"setting: {priority!r} (defaulting to 0)")
priority = 0
finally:
repo_conf['priority'] = priority
# register repo
repos[name] = repo_conf
if repos:
# the default repo is gentoo if unset and gentoo exists
default_repo = main_defaults.get('main-repo', 'gentoo')
if default_repo not in repos:
raise config_errors.UserConfigError(
f"repos.conf: default repo {default_repo!r} is undefined or invalid")
if 'main-repo' not in main_defaults:
main_defaults['main-repo'] = default_repo
# the default repo has a low priority if unset or zero
if repos[default_repo]['priority'] == 0:
repos[default_repo]['priority'] = -1000
# sort repos via priority, in this case high values map to high priorities
repos = OrderedDict(
(k, v) for k, v in
sorted(repos.items(), key=lambda d: d[1]['priority'], reverse=True))
return main_defaults, repos
def _make_repo_syncers(self, repos_conf, make_conf, allow_timestamps=True):
"""generate syncing configs for known repos"""
rsync_opts = None
usersync = 'usersync' in | |
<gh_stars>0
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['MetricAlert']
class MetricAlert(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
actions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MetricAlertActionArgs']]]]] = None,
application_insights_web_test_location_availability_criteria: Optional[pulumi.Input[pulumi.InputType['MetricAlertApplicationInsightsWebTestLocationAvailabilityCriteriaArgs']]] = None,
auto_mitigate: Optional[pulumi.Input[bool]] = None,
criterias: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MetricAlertCriteriaArgs']]]]] = None,
description: Optional[pulumi.Input[str]] = None,
dynamic_criteria: Optional[pulumi.Input[pulumi.InputType['MetricAlertDynamicCriteriaArgs']]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
frequency: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
scopes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
severity: Optional[pulumi.Input[int]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
target_resource_location: Optional[pulumi.Input[str]] = None,
target_resource_type: Optional[pulumi.Input[str]] = None,
window_size: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Manages a Metric Alert within Azure Monitor.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
main_resource_group = azure.core.ResourceGroup("mainResourceGroup", location="West US")
to_monitor = azure.storage.Account("toMonitor",
resource_group_name=main_resource_group.name,
location=main_resource_group.location,
account_tier="Standard",
account_replication_type="LRS")
main_action_group = azure.monitoring.ActionGroup("mainActionGroup",
resource_group_name=main_resource_group.name,
short_name="exampleact",
webhook_receivers=[azure.monitoring.ActionGroupWebhookReceiverArgs(
name="callmyapi",
service_uri="http://example.com/alert",
)])
example = azure.monitoring.MetricAlert("example",
resource_group_name=main_resource_group.name,
scopes=[to_monitor.id],
description="Action will be triggered when Transactions count is greater than 50.",
criterias=[azure.monitoring.MetricAlertCriteriaArgs(
metric_namespace="Microsoft.Storage/storageAccounts",
metric_name="Transactions",
aggregation="Total",
operator="GreaterThan",
threshold=50,
dimensions=[azure.monitoring.MetricAlertCriteriaDimensionArgs(
name="ApiName",
operator="Include",
values=["*"],
)],
)],
actions=[azure.monitoring.MetricAlertActionArgs(
action_group_id=main_action_group.id,
)])
```
## Import
Metric Alerts can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:monitoring/metricAlert:MetricAlert main /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/example-resources/providers/microsoft.insights/metricalerts/example-metricalert
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MetricAlertActionArgs']]]] actions: One or more `action` blocks as defined below.
:param pulumi.Input[pulumi.InputType['MetricAlertApplicationInsightsWebTestLocationAvailabilityCriteriaArgs']] application_insights_web_test_location_availability_criteria: A `application_insights_web_test_location_availability_criteria` block as defined below.
:param pulumi.Input[bool] auto_mitigate: Should the alerts in this Metric Alert be auto resolved? Defaults to `true`.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MetricAlertCriteriaArgs']]]] criterias: One or more (static) `criteria` blocks as defined below.
:param pulumi.Input[str] description: The description of this Metric Alert.
:param pulumi.Input[pulumi.InputType['MetricAlertDynamicCriteriaArgs']] dynamic_criteria: A `dynamic_criteria` block as defined below.
:param pulumi.Input[bool] enabled: Should this Metric Alert be enabled? Defaults to `true`.
:param pulumi.Input[str] frequency: The evaluation frequency of this Metric Alert, represented in ISO 8601 duration format. Possible values are `PT1M`, `PT5M`, `PT15M`, `PT30M` and `PT1H`. Defaults to `PT1M`.
:param pulumi.Input[str] name: The name of the Metric Alert. Changing this forces a new resource to be created.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the Metric Alert instance.
:param pulumi.Input[Sequence[pulumi.Input[str]]] scopes: A set of strings of resource IDs at which the metric criteria should be applied.
:param pulumi.Input[int] severity: The severity of this Metric Alert. Possible values are `0`, `1`, `2`, `3` and `4`. Defaults to `3`.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
:param pulumi.Input[str] target_resource_location: The location of the target resource.
:param pulumi.Input[str] target_resource_type: The resource type (e.g. `Microsoft.Compute/virtualMachines`) of the target resource.
:param pulumi.Input[str] window_size: The period of time that is used to monitor alert activity, represented in ISO 8601 duration format. This value must be greater than `frequency`. Possible values are `PT1M`, `PT5M`, `PT15M`, `PT30M`, `PT1H`, `PT6H`, `PT12H` and `P1D`. Defaults to `PT5M`.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['actions'] = actions
__props__['application_insights_web_test_location_availability_criteria'] = application_insights_web_test_location_availability_criteria
__props__['auto_mitigate'] = auto_mitigate
__props__['criterias'] = criterias
__props__['description'] = description
__props__['dynamic_criteria'] = dynamic_criteria
__props__['enabled'] = enabled
__props__['frequency'] = frequency
__props__['name'] = name
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
if scopes is None:
raise TypeError("Missing required property 'scopes'")
__props__['scopes'] = scopes
__props__['severity'] = severity
__props__['tags'] = tags
__props__['target_resource_location'] = target_resource_location
__props__['target_resource_type'] = target_resource_type
__props__['window_size'] = window_size
super(MetricAlert, __self__).__init__(
'azure:monitoring/metricAlert:MetricAlert',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
actions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MetricAlertActionArgs']]]]] = None,
application_insights_web_test_location_availability_criteria: Optional[pulumi.Input[pulumi.InputType['MetricAlertApplicationInsightsWebTestLocationAvailabilityCriteriaArgs']]] = None,
auto_mitigate: Optional[pulumi.Input[bool]] = None,
criterias: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MetricAlertCriteriaArgs']]]]] = None,
description: Optional[pulumi.Input[str]] = None,
dynamic_criteria: Optional[pulumi.Input[pulumi.InputType['MetricAlertDynamicCriteriaArgs']]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
frequency: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
scopes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
severity: Optional[pulumi.Input[int]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
target_resource_location: Optional[pulumi.Input[str]] = None,
target_resource_type: Optional[pulumi.Input[str]] = None,
window_size: Optional[pulumi.Input[str]] = None) -> 'MetricAlert':
"""
Get an existing MetricAlert resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MetricAlertActionArgs']]]] actions: One or more `action` blocks as defined below.
:param pulumi.Input[pulumi.InputType['MetricAlertApplicationInsightsWebTestLocationAvailabilityCriteriaArgs']] application_insights_web_test_location_availability_criteria: A `application_insights_web_test_location_availability_criteria` block as defined below.
:param pulumi.Input[bool] auto_mitigate: Should the alerts in this Metric Alert be auto resolved? Defaults to `true`.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MetricAlertCriteriaArgs']]]] criterias: One or more (static) `criteria` blocks as defined below.
:param pulumi.Input[str] description: The description of this Metric Alert.
:param pulumi.Input[pulumi.InputType['MetricAlertDynamicCriteriaArgs']] dynamic_criteria: A `dynamic_criteria` block as defined below.
:param pulumi.Input[bool] enabled: Should this Metric Alert be enabled? Defaults to `true`.
:param pulumi.Input[str] frequency: The evaluation frequency of this Metric Alert, represented in ISO 8601 duration format. Possible values are `PT1M`, `PT5M`, `PT15M`, `PT30M` and `PT1H`. Defaults to `PT1M`.
:param pulumi.Input[str] name: The name of the Metric Alert. Changing this forces a new resource to be created.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the Metric Alert instance.
:param pulumi.Input[Sequence[pulumi.Input[str]]] scopes: A set of strings of resource IDs at which the metric criteria should be applied.
:param pulumi.Input[int] severity: The severity of this Metric Alert. Possible values are `0`, `1`, `2`, `3` and `4`. Defaults to `3`.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
:param pulumi.Input[str] target_resource_location: The location of the target resource.
:param pulumi.Input[str] target_resource_type: The resource type (e.g. `Microsoft.Compute/virtualMachines`) of the target resource.
:param pulumi.Input[str] window_size: The period of time that is used to monitor alert activity, represented in ISO 8601 duration format. This value must be greater than `frequency`. Possible values are `PT1M`, `PT5M`, `PT15M`, `PT30M`, `PT1H`, `PT6H`, `PT12H` and `P1D`. Defaults to `PT5M`.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["actions"] = actions
__props__["application_insights_web_test_location_availability_criteria"] = application_insights_web_test_location_availability_criteria
__props__["auto_mitigate"] = auto_mitigate
__props__["criterias"] = criterias
__props__["description"] = description
__props__["dynamic_criteria"] = dynamic_criteria
__props__["enabled"] = enabled
__props__["frequency"] = frequency
__props__["name"] = name
__props__["resource_group_name"] = resource_group_name
__props__["scopes"] = scopes
__props__["severity"] = severity
__props__["tags"] = tags
__props__["target_resource_location"] = target_resource_location
__props__["target_resource_type"] = target_resource_type
__props__["window_size"] = window_size
return MetricAlert(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def actions(self) -> pulumi.Output[Optional[Sequence['outputs.MetricAlertAction']]]:
"""
One or more `action` blocks as defined below.
"""
return pulumi.get(self, "actions")
@property
@pulumi.getter(name="applicationInsightsWebTestLocationAvailabilityCriteria")
def application_insights_web_test_location_availability_criteria(self) -> pulumi.Output[Optional['outputs.MetricAlertApplicationInsightsWebTestLocationAvailabilityCriteria']]:
"""
A `application_insights_web_test_location_availability_criteria` block as defined below.
"""
return pulumi.get(self, "application_insights_web_test_location_availability_criteria")
@property
@pulumi.getter(name="autoMitigate")
def auto_mitigate(self) -> pulumi.Output[Optional[bool]]:
"""
Should the alerts in this Metric Alert be auto resolved? Defaults to `true`.
"""
return pulumi.get(self, "auto_mitigate")
@property
@pulumi.getter
def criterias(self) -> pulumi.Output[Optional[Sequence['outputs.MetricAlertCriteria']]]:
"""
One or more (static) `criteria` blocks as defined below.
"""
return pulumi.get(self, "criterias")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
The description of this Metric Alert.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="dynamicCriteria")
def dynamic_criteria(self) -> pulumi.Output[Optional['outputs.MetricAlertDynamicCriteria']]:
"""
A `dynamic_criteria` block as defined below.
"""
return pulumi.get(self, "dynamic_criteria")
@property
@pulumi.getter
def enabled(self) -> pulumi.Output[Optional[bool]]:
"""
Should this Metric Alert be enabled? Defaults to `true`.
"""
return pulumi.get(self, "enabled")
@property
@pulumi.getter
def frequency(self) -> pulumi.Output[Optional[str]]:
"""
The evaluation frequency of this Metric Alert, represented in ISO 8601 duration format. Possible values are `PT1M`, `PT5M`, `PT15M`, `PT30M` and `PT1H`. Defaults to `PT1M`.
"""
return pulumi.get(self, "frequency")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the | |
must be a list-like with numeric or'
'date-like values: %s', list_check)
def _get_f_dummy_from_calendar(calendar):
# Generate dummy model function from a pandas HolidayCalendar
def f_dummy_calendar(a_x, a_date, **kwargs):
# TODO: If we can pass dict_cal as an argument,
# use pre-loaded list of dates for performance
# TODO: If we can guarantee sorted dates,
# change this to a_date[0], a_date[-1] for performance
list_check_date = calendar.holidays(a_date.min(), a_date.max())
return np.isin(a_date, list_check_date).astype(float)
return f_dummy_calendar
def _get_f_dummy_from_holiday(holiday):
def f_dummy_holiday(a_x, a_date, **kwargs):
# TODO: If we can pass dict_cal as an argument,
# use pre-loaded list of dates for performance
# if dict_cal in kwargs.keys():
# list_check_date = dict_cal.get(holiday.name)
# else:
# TODO: If we can guarantee sorted dates,
# change this to a_date[0], a_date[-1] for performance
list_check_date = holiday.dates(a_date.min(), a_date.max())
return np.isin(a_date, list_check_date).astype(float)
return f_dummy_holiday
def _get_f_model_dummy(f_dummy, mask_name):
"""
Generate a model function for a dummy variable defined by f_dummy
:param dummy: dummy variable, that can be used to generate a mask array
:type dummy: function, pandas Holiday/Calendar,
or list-like of numerics or dates
:return: model function based on dummy variable, to use on a ForecastModel
:rtype: function
"""
def f_model_check(a_x, a_date, params, is_mult=False, **kwargs):
# Uses internal f_check to assign 0 or 1 to each sample
# If f_dummy(x)==1, return A
# If f_dummy(x)==0, return 0 (or 1 if is_mult)
a_mask = kwargs.get(mask_name)
if a_mask is None:
a_mask = f_dummy(a_x, a_date)
[A] = params
if not is_mult:
a_result = A * a_mask
else:
a_result = (A) * a_mask + 1
return a_result
return f_model_check
def get_model_dummy(name, dummy, **kwargs):
"""
Generate a model based on a dummy variable.
:param name: Name of the model
:type name: basestring
:param dummy:
| Can be a function or a list-like.
| If a function, it must be of the form f_dummy(a_x, a_date),
| and return a numpy array of floats
| with the same length as a_x and values that are either 0 or 1.
| If a list-like of numerics, it will be converted to a f_dummy function
| as described above, which will have values of 1 when a_x has one of
| the values in the list, and 0 otherwise. If a list-like of date-likes,
| it will be converted to a f_dummy function as described above, which
| will have values of 1 when a_date has one of the values in the list,
| and 0 otherwise.
:type dummy: function, or list-like of numerics or datetime-likes
:param kwargs:
:type kwargs:
:return:
| A model that returns A when dummy is 1, and 0 (or 1 if is_mult==True)
| otherwise.
:rtype: ForecastModel
"""
mask_name = 'mask_' + name
f_dummy = _get_f_dummy(dummy)
_validate_f_dummy(f_dummy)
f_model_dummy = _get_f_model_dummy(f_dummy, mask_name)
dict_f_cache = {mask_name: f_dummy}
return ForecastModel(
name, 1, f_model_dummy, dict_f_cache=dict_f_cache, **kwargs)
model_season_wday_2 = get_model_dummy(
'season_wday_2', lambda a_x, a_date, **kwargs:
(a_date.weekday < 5).astype(float))
# Example dummy model - checks if it is Christmas
model_dummy_christmas = get_model_dummy(
'dummy_christmas', lambda a_x, a_date, **kwargs:
((a_date.month == 12) & (a_date.day == 25)).astype(float))
# Example dummy model - checks if it is first day of month
model_dummy_month_start = get_model_dummy(
'dummy_month_start', lambda a_x, a_date, **kwargs:
(a_date.day == 1).astype(float))
class CalendarBankHolUK(AbstractHolidayCalendar):
rules = [
GoodFriday,
EasterMonday,
# Early May Bank Holiday - first Monday in May
Holiday('Early May Bank Holiday', month=5, day=1,
offset=DateOffset(weekday=MO(1))
),
# Spring Bank Holiday - Last Monday in May
Holiday('Spring Bank Holiday', month=5, day=31,
offset=DateOffset(weekday=MO(-1))
),
# August Bank holiday - Last Monday in August
Holiday('August Bank Holiday', month=8, day=30,
offset=DateOffset(weekday=MO(-1))
)
]
class CalendarChristmasUK(AbstractHolidayCalendar):
rules = [
Holiday('New Year\'s Day', month=1, day=1, observance=next_monday),
Holiday('Christmas', month=12, day=25, observance=next_monday),
Holiday('Boxing Day', month=12, day=26,
observance=next_monday_or_tuesday),
]
# Bank Holidays for Italy
class CalendarBankHolIta(AbstractHolidayCalendar):
rules = [
EasterMonday,
Holiday('Festa della Liberazione', month=4, day=25),
Holiday('Festa del lavoro', month=5, day=1),
Holiday('Festa della Repubblica', month=6, day=2),
Holiday('Ferragosto', month=8, day=15),
Holiday('Tutti i Santi', month=11, day=1),
Holiday('Immacolata Concezione', month=12, day=8),
]
class CalendarChristmasIta(AbstractHolidayCalendar):
rules = [
Holiday('New Year\'s Day', month=1, day=1, observance=next_monday),
Holiday('Christmas', month=12, day=25, observance=next_monday),
Holiday('Santo Stefano', month=12, day=26,
observance=next_monday_or_tuesday),
Holiday('Epiphany', month=1, day=6, observance=next_monday),
]
def get_model_from_calendars(l_calendar, name=None):
"""
Create a ForecastModel based on a list of pandas Calendars.
:param calendar:
:type calendar: pandas.tseries.AbstractHolidayCalendar
:return: model based on the input calendar
:rtype: ForecastModel
In pandas, Holidays and calendars provide a simple way to define
holiday rules, to be used in any analysis that requires a predefined
set of holidays. This function converts a Calendar object into a
ForecastModel that assigns a parameter to each calendar rule.
As an example, a Calendar with 1 rule defining Christmas dates
generates a model with a single parameter, which
determines the amount added/multiplied to samples falling on Christmas.
A calendar with 2 rules for Christmas and New Year will have two parameters
- the first one applying to samples in Christmas, and the second
one applying to samples in New Year.
Usage::
from pandas.tseries.holiday import USFederalHolidayCalendar
model_calendar = get_model_from_calendar(USFederalHolidayCalendar())
"""
if isinstance(l_calendar, AbstractHolidayCalendar):
l_calendar = [l_calendar]
# Filter out calendars without rules
l_calendar = [calendar for calendar in l_calendar if calendar.rules]
assert len(l_calendar), 'Need 1+ valid calendars'
if name is None:
name = l_calendar[0].name
l_model_dummy = [get_model_dummy(calendar.name, calendar)
for calendar in l_calendar]
f_model_prod = np.prod(l_model_dummy)
f_model_sum = np.sum(l_model_dummy)
def _f_init_params_calendar(
a_x=None, a_y=None, a_date=None, is_mult=False):
if is_mult:
return np.ones(len(l_model_dummy))
else:
return np.zeros(len(l_model_dummy))
def _f_model_calendar(a_x, a_date, params, is_mult=False, **kwargs):
f_all_dummies = f_model_prod if is_mult else f_model_sum
return f_all_dummies(a_x, a_date, params, is_mult, **kwargs)
model_calendar = ForecastModel(
name,
len(l_model_dummy),
_f_model_calendar,
_f_init_params_calendar,
l_cache_vars=f_model_sum.l_cache_vars,
dict_f_cache=f_model_sum.dict_f_cache
)
return model_calendar
model_calendar_uk = get_model_from_calendars(
[CalendarChristmasUK(), CalendarBankHolUK()], 'calendar_uk')
model_calendar_us = get_model_from_calendars(USFederalHolidayCalendar(),
'calendar_us')
# Calendar for Italy
model_calendar_ita = get_model_from_calendars(
[CalendarChristmasIta(), CalendarBankHolIta()], 'calendar_uk')
def get_model_from_datelist(name=None, *args):
"""
Create a ForecastModel based on one or more lists of dates.
:param name: Model name
:type name: str
:param args: Each element in args is a list of dates.
:type args:
:return: model based on the input lists of dates
:rtype: ForecastModel
Usage::
model_datelist1=get_model_from_date_list('datelist1',
[date1, date2, date3])
model_datelists23 = get_model_from_date_list('datelists23',
[date1, date2], [date3, date4])
In the example above, model_datelist1 will have one parameter, which
determines the amount added/multiplied to samples with dates matching
either date1, date2 or date3. model_datelists23 will have two parameters
- the first one applying to samples in date1 and date2, and the second
one applying to samples in date 3 and date4
"""
l_model_dummy = [get_model_dummy('model_dummy', pd.to_datetime(l_date))
for l_date in args]
assert (len(l_model_dummy)), 'Need 1+ lists of dates'
f_model_prod = np.prod(l_model_dummy)
f_model_sum = np.sum(l_model_dummy)
def _f_init_params_date_list(
a_x=None, a_y=None, a_date=None, is_mult=False):
if is_mult:
return np.ones(len(l_model_dummy))
else:
return np.zeros(len(l_model_dummy))
def _f_model_date_list(a_x, a_date, params, is_mult=False, **kwargs):
f_all_dummies = f_model_prod if is_mult else f_model_sum
return f_all_dummies(a_x, a_date, params, is_mult, **kwargs)
model_date_list = ForecastModel(
name,
len(l_model_dummy),
_f_model_date_list,
_f_init_params_date_list
)
return model_date_list
# Utility functions
def fix_params_fmodel(forecast_model, l_params_fixed):
"""
Given a forecast model and a list of floats, modify the model so that some
of its parameters become fixed
:param forecast_model: Input model
:type forecast_model: ForecastModel
:param l_params_fixed: List of floats with same length as number of
parameters in model. For each element, a non-null value means
that the parameter in that position is fixed to that value.
A null value means that the parameter in that position is not fixed.
:type l_params_fixed: list
:return: A forecast model with a number of parameters equal to the number
of null values in l_params_fixed, with f_model modified so that some
of its parameters gain fixed values equal to the non-null values
in l_params
:rtype: ForecastModel
"""
assert len(l_params_fixed) == forecast_model.n_params
l_params_fixed = np.array(l_params_fixed)
a_null = np.isnan(l_params_fixed)
i_null = np.nonzero(a_null)
name = '{}_fixed_{}'.format(
forecast_model.name,
str(l_params_fixed).replace(
'nan',
':'))
n_params = len(i_null[0])
def f_model_fixed(a_x, a_date, params, is_mult=False, **kwargs):
params_long = l_params_fixed
params_long[i_null] = params
return forecast_model.f_model(a_x, a_date, params_long, is_mult)
def f_init_params_fixed(a_x=None, a_y=None, a_date=None, is_mult=False):
# return params short
params_init = forecast_model.f_init_params(a_x, a_y, a_date, is_mult)
params_init_short = np.array(params_init)[i_null]
return params_init_short
def f_bounds_fixed(a_x=None, a_y=None, a_date=None):
# return f_bounds short
bounds_min, bounds_max = forecast_model.f_bounds(a_x, a_y, a_date)
bounds_min_short = np.array(bounds_min)[i_null]
bounds_max_short = np.array(bounds_max)[i_null]
return bounds_min_short, bounds_max_short
model_result = ForecastModel(
name,
n_params,
f_model_fixed,
f_init_params_fixed,
f_bounds_fixed)
return model_result
def simplify_model(f_model, a_x=None, a_y=None, a_date=None):
"""
Check | |
#!/usr/bin/env python
from __future__ import print_function
import sys
import os
from ROOT import *
from copy import deepcopy
from array import array
import six
gROOT.SetBatch() # don't pop up canvases
#Find Data files
def getFileInPath(rfile):
import os
for dir in os.environ['CMSSW_SEARCH_PATH'].split(":"):
if os.path.exists(os.path.join(dir,rfile)): return os.path.join(dir,rfile)
return None
# Default values
inputFileName = "DQM_V0013_R000292154__StreamExpressCosmics__Commissioning2017-Express-v1__DQMIO.root"
limitsFileName = "limits.dat"
outputDirectoryName = "OUT/"
minMaxFileName = "minmax.out"
#detIDsFileName = "DATA/detids.dat"
detIDsFileName = getFileInPath('DQM/SiStripMonitorClient/data/detids.dat')
#default one
baseRootDirs = ["DQMData/Run 292154/PixelPhase1/Run summary/Phase1_MechanicalView"
,"DQMData/Run 292154/PixelPhase1/Run summary/Tracks"
]
maxPxBarrel = 4
maxPxForward = 3
barrelLadderShift = [0, 14, 44, 90]
forwardDiskXShift = [25, 75, 125]
forwardDiskYShift = 45; # to make +DISK on top in the 'strip-like' layout
plotWidth, plotHeight = 3000, 2000
extremeBinsNum = 20
limits = ["num_digis 0.01 90 1 0",
"num_clusters 0.01 25 1 0",
"Trechitsize_y 0.01 10 0 0",
"Trechitsize_x 0.01 10 0 0",
"Tresidual_y 0.0000001 0.004 0 1",
"Tresidual_x 0.0000001 0.004 0 1",
"Tcharge 2000 80000 0 0",
"Thitefficiency 0.95 1 0 0",
#"Tmissing 0.01 500 0 0",
"Tnum_clusters_ontrack 0.01 15 1 0",
"Tsize 0.01 15 0 0",
#"Tvalid 0.01 90 0 0",
"adc 0.01 256 0 0",
"charge 2000 80000 0 0",
"size 0.01 15 0 0",]
class TH2PolyOfflineMaps:
###
# LOTS OF CODE BORROWED FROM: PYTHONBINREADER, PIXELTRACKERMAP
###
############################################################################
def __TraverseDirTree(self, dir):
try:
currPath = (dir.GetPath().split(":/"))[1]
except:
print("Exception raised: Path not found in the input file")
return
for obj in dir.GetListOfKeys():
if not obj.IsFolder():
if obj.ReadObjectAny(TClass.GetClass("TH2")):
th2 = deepcopy(obj.ReadObj())
name = th2.GetName()
if 6 < th2.GetNbinsX() < 10 and name.find("per") != -1 and name.find("Lumisection") == -1: #take only module lvl plots
print(''.join([dir.GetPath(), '/', name]))
# fix when there are plots starting with the same strings in different directories
prefix = ""
for i in self.dirs:
if currPath.startswith(i):
prefix = self.dirsAliases[i]
break
# print(currPath, prefix)
th2.SetName(prefix + th2.GetName())
self.listOfNumHistograms.append(th2)
else:
self.__TraverseDirTree(obj.ReadObj())
def __GetPartStr(self, isXlowerThanZero, isYlowerThanZero):
if isXlowerThanZero and isYlowerThanZero:
return "mO"
if isXlowerThanZero and isYlowerThanZero == False:
return "mI"
if isXlowerThanZero == False and isYlowerThanZero:
return "pO"
if isXlowerThanZero == False and isYlowerThanZero == False:
return "pI"
def __GetBarrelSector(self, layer, signedLadder, signedModule): #adapted from PixelBarrelName
theLadder = abs(signedLadder)
theModule = abs(signedModule)
sector = 0
if layer == 1:
if theLadder == 1:
if theModule >= 2:
return 1
else:
return 2
if theLadder == 2:
if theModule >= 3:
return 2
else:
return 3
if theLadder == 3:
if theModule >= 4:
return 3
else:
return 4
if theLadder == 4:
if theModule >= 2:
return 5
else:
return 6
if theLadder == 5:
if theModule >= 3:
return 6
else:
return 7
if theLadder == 6:
if theModule >= 4:
return 7
else:
return 8
# here is used simplified form of assignment, see source file for reference
elif layer == 2:
i = theLadder // 5
sector = i * 3
shortLadder = theLadder - 5 * i
for i in range(0, shortLadder, 2):
sector = sector + 1
return sector
elif layer == 3:
sector = 1
for i in range(2, theLadder, 3):
if (i + 1) % 3 == 0:
sector = sector + 1
return sector
elif layer == 4:
sector = (theLadder + 3) // 4
return sector
def __BuildOnlineBarrelName(self, signedModule, signedLadder, layer): #in Phase1 it is assumed that there are only full modules
thePart = self.__GetPartStr(signedModule < 0, signedLadder < 0)
theSector = str(self.__GetBarrelSector(layer, signedLadder, signedModule))
return "BPix_B" + thePart + "_SEC" + theSector + "_LYR" + str(layer) + "_LDR" + str(abs(signedLadder)) + "F_MOD" + str(abs(signedModule))
def __BuildOnlineDiskName(self, signedDisk, signedBlade, panel, ring):
thePart = self.__GetPartStr(signedDisk < 0, signedBlade < 0)
return "FPix_B" + thePart + "_D" + str(abs(signedDisk)) + "_BLD" + str(abs(signedBlade)) + "_PNL" + str(panel) + "_RNG" + str(ring)
def __GroupHistograms(self):
currentGroupName = ""
groupOfHists = []
self.groupedHistograms = []
##### GROUP ALL LAYERS/RINGS HAVING SIMILAR INFORMATION
for obj in self.listOfNumHistograms:
objName = obj.GetName()
objNameSplit = objName.split("_")
objNameCollected = ''.join(objNameSplit[0:-1])
if objNameCollected != currentGroupName:
if len(groupOfHists):
self.groupedHistograms.append(groupOfHists)
groupOfHists = []
currentGroupName = objNameCollected
groupOfHists.append(obj)
self.groupedHistograms.append(groupOfHists) #the last group
def __AddNamedBins(self, geoFile, tX, tY, sX, sY, applyModuleRotation = False):
for line in geoFile:
lineSpl = line.strip().split("\"")
detId = lineSpl[0].split(" ")[0]
vertices = lineSpl[1]
xy = vertices.split(" ")
x, y = array('d'), array('d')
verNum = 1
for coord in xy:
coordSpl = coord.split(",")
if applyModuleRotation:
x.append(-(float(coordSpl[0]) * sX + tX))
y.append((float(coordSpl[1]) * sY + tY))
else:
x.append(float(coordSpl[0]) * sX + tX)
y.append(float(coordSpl[1]) * sY + tY)
verNum = verNum + 1
#close polygon
x.append(x[0])
y.append(y[0])
# print(detId, vertices)
# print(x)
# print(y)
if applyModuleRotation:
bin = TGraph(verNum, y, x)
else:
bin = TGraph(verNum, x, y)
# bin = TGraph(verNum, y, x) # rotation by 90 deg (so that it had the same layout as for the strips)
bin.SetName(detId)
self.__BaseTrackerMap.AddBin(bin)
def __CreateTrackerBaseMap(self):
self.__BaseTrackerMap = TH2Poly("Summary", "", -10, 160, -70, 70)
# self.__BaseTrackerMap = TH2Poly("Summary", "Tracker Map", 0, 0, 0, 0)
self.__BaseTrackerMap.SetFloat(1)
self.__BaseTrackerMap.GetXaxis().SetTitle("")
self.__BaseTrackerMap.GetYaxis().SetTitle("")
self.__BaseTrackerMap.SetOption("COLZ L")
self.__BaseTrackerMap.SetStats(0)
# BARREL FIRST
for i in range(maxPxBarrel):
with open(self.geometryFilenames[i], "r") as geoFile:
currBarrelTranslateX = 0
currBarrelTranslateY = barrelLadderShift[i]
self.__AddNamedBins(geoFile, currBarrelTranslateX, currBarrelTranslateY, 1, 1, True)
# break # debug only 1st layer
# MINUS FORWARD
for i in range(-maxPxForward, 0):
with open(self.geometryFilenames[maxPxBarrel + maxPxForward + i], "r") as geoFile:
currForwardTranslateX = forwardDiskXShift[-i - 1]
currForwardTranslateY = -forwardDiskYShift
self.__AddNamedBins(geoFile, currForwardTranslateX, currForwardTranslateY, 1, 1)
# PLUS FORWARD
for i in range(maxPxForward):
with open(self.geometryFilenames[maxPxBarrel + maxPxForward + i], "r") as geoFile:
currForwardTranslateX = forwardDiskXShift[i]
currForwardTranslateY = forwardDiskYShift
self.__AddNamedBins(geoFile, currForwardTranslateX, currForwardTranslateY, 1, 1)
# self.__BaseTrackerMap.Fill("305139728", 2)
print("Base Tracker Map: constructed")
############################################################################
def __init__(self, inputDQMName, outputDirName, minMaxFileName, limits, modDicName, runNumber, dirs, dirsAliases):
# def __init__(self, inputDQMName, outputDirName, minMaxFileName, limitsFileName, modDicName, runNumber, dirs, dirsAliases):
self.inputFileName = inputDQMName
self.outputDirName = outputDirName
self.minMaxFileName = minMaxFileName
# self.limitsFileName = limitsFileName
self.detIDsFileName = modDicName
self.limits = limits
self.runNumber = runNumber
self.dirs = dirs
self.dirsAliases = dirsAliases
self.inputFile = TFile(self.inputFileName)
self.listOfNumHistograms = []
self.availableNames = []
self.maxLadderToLayer = {6:1, 14:2, 22:3, 32:4}
self.maxBladeToRing = {11:1, 17:2}
self.geometryFilenames = []
for i in range(maxPxBarrel):
self.geometryFilenames.append(getFileInPath("DQM/SiStripMonitorClient/data/Geometry/vertices_barrel_" + str(i + 1)))
# self.geometryFilenames.append("DATA/Geometry/vertices_barrel_" + str(i + 1))
for i in range(-maxPxForward, maxPxForward + 1):
if i == 0:
continue #there is no 0 disk
self.geometryFilenames.append(getFileInPath("DQM/SiStripMonitorClient/data/Geometry/vertices_forward_" + str(i)))
# self.geometryFilenames.append("DATA/Geometry/vertices_forward_" + str(i))
self.internalData = {}
if self.inputFile.IsOpen():
print("%s opened successfully!" % (self.inputFileName))
#Get all neeeded histograms
for dir in self.dirs:
self.__TraverseDirTree(self.inputFile.Get(dir))
# print("Histograms to read %d" % (len(self.listOfNumHistograms)))
self.detDict = {}
with open(self.detIDsFileName, "r") as detIDs: # create dictionary online -> rawid
for entry in detIDs:
items = entry.replace("\n", " ").split(" ")
self.detDict.update({items[1] : int(items[0])})
# init internal data structure
self.internalData.update({int(items[0]) : {}})
self.rawToOnlineDict = dict((v,k) for k,v in six.iteritems(self.detDict))
self.__GroupHistograms()
self.__CreateTrackerBaseMap()
else:
print("Unable to open file %s" % (self.inputFileName))
### CREATE LIMITS DICTIONARY
self.limitsDic = {}
for y in limits:
lineSpl = y.strip().split(" ")
if len(lineSpl) < 5:
continue
currName = lineSpl[0]
zMin = float(lineSpl[1])
zMax = float(lineSpl[2])
isLog = False if lineSpl[3] == "0" else True
isAbs = False if lineSpl[4] == "0" else True
self.limitsDic.update({currName : {"zMin" : zMin, "zMax" : zMax, "isLog" : isLog, "isAbs" : isAbs}})
# print limitsDic
def ReadHistograms(self):
if self.inputFile.IsOpen():
for group in self.groupedHistograms:
# name = ''.join(group[0].GetName().split("_")[0:-1])
if len(group) == 0:
return
print(group[0].GetName())
name = ''.join(group[0].GetName().split("_per_")[0])
self.availableNames.append(name)
# print(name)
for obj in group:
nbinsX = obj.GetNbinsX()
nbinsY = obj.GetNbinsY()
if nbinsX == 9: # BARREL
maxX = nbinsX // 2
maxY = nbinsY // 2
for x in range(-maxX, maxX + 1):
if x == 0:
continue
for y in range(-maxY, maxY + 1, 1):
if y == 0:
continue
onlineName = self.__BuildOnlineBarrelName(x, y, self.maxLadderToLayer[maxY])
self.internalData[self.detDict[onlineName]].update({name : obj.GetBinContent(x + maxX + 1, y + maxY + 1)})
elif nbinsX == 7: # FORWARD
maxX = nbinsX // 2
maxY = | |
= self._player.night_mode
speech_enhance = self._player.dialog_mode
is_radio_stream = \
current_media_uri.startswith('x-sonosapi-stream:') or \
current_media_uri.startswith('x-rincon-mp3radio:')
if is_playing_tv or is_playing_line_in:
# playing from line-in/tv.
support_previous_track = False
support_next_track = False
support_play = False
support_stop = True
support_pause = False
support_shuffle_set = False
if is_playing_tv:
media_artist = SUPPORT_SOURCE_TV
else:
media_artist = SUPPORT_SOURCE_LINEIN
source_name = media_artist
media_album_name = None
media_title = None
media_image_url = None
elif is_radio_stream:
media_image_url = self._format_media_image_url(
media_image_url,
current_media_uri
)
support_previous_track = False
support_next_track = False
support_play = True
support_stop = True
support_pause = False
support_shuffle_set = False
source_name = 'Radio'
# Check if currently playing radio station is in favorites
favc = [fav for fav in self._favorite_sources
if fav['uri'] == current_media_uri]
if len(favc) == 1:
src = favc.pop()
source_name = src['title']
# for radio streams we set the radio station name as the
# title.
if media_artist and media_title:
# artist and album name are in the data, concatenate
# that do display as artist.
# "Information" field in the sonos pc app
media_artist = '{artist} - {title}'.format(
artist=media_artist,
title=media_title
)
else:
# "On Now" field in the sonos pc app
media_artist = self._media_radio_show
current_uri_metadata = media_info["CurrentURIMetaData"]
if current_uri_metadata not in ('', 'NOT_IMPLEMENTED', None):
# currently soco does not have an API for this
import soco
current_uri_metadata = soco.xml.XML.fromstring(
soco.utils.really_utf8(current_uri_metadata))
md_title = current_uri_metadata.findtext(
'.//{http://purl.org/dc/elements/1.1/}title')
if md_title not in ('', 'NOT_IMPLEMENTED', None):
media_title = md_title
if media_artist and media_title:
# some radio stations put their name into the artist
# name, e.g.:
# media_title = "Station"
# media_artist = "Station - Artist - Title"
# detect this case and trim from the front of
# media_artist for cosmetics
str_to_trim = '{title} - '.format(
title=media_title
)
chars = min(len(media_artist), len(str_to_trim))
if media_artist[:chars].upper() == str_to_trim[:chars].upper():
media_artist = media_artist[chars:]
else:
# not a radio stream
media_image_url = self._format_media_image_url(
media_image_url,
track_info['uri']
)
support_previous_track = True
support_next_track = True
support_play = True
support_stop = True
support_pause = True
support_shuffle_set = True
position_info = self._player.avTransport.GetPositionInfo(
[('InstanceID', 0),
('Channel', 'Master')]
)
rel_time = _parse_timespan(
position_info.get("RelTime")
)
# player no longer reports position?
update_media_position = rel_time is None and \
self._media_position is not None
# player started reporting position?
update_media_position |= rel_time is not None and \
self._media_position is None
# position changed?
if rel_time is not None and self._media_position is not None:
time_diff = utcnow() - self._media_position_updated_at
time_diff = time_diff.total_seconds()
calculated_position = self._media_position + time_diff
update_media_position = \
abs(calculated_position - rel_time) > 1.5
if update_media_position and self.state == STATE_PLAYING:
media_position = rel_time
media_position_updated_at = utcnow()
else:
# don't update media_position (don't want unneeded
# state transitions)
media_position = self._media_position
media_position_updated_at = self._media_position_updated_at
playlist_position = track_info.get('playlist_position')
if playlist_position in ('', 'NOT_IMPLEMENTED', None):
playlist_position = None
else:
playlist_position = int(playlist_position)
playlist_size = media_info.get('NrTracks')
if playlist_size in ('', 'NOT_IMPLEMENTED', None):
playlist_size = None
else:
playlist_size = int(playlist_size)
if playlist_position is not None and playlist_size is not None:
if playlist_position <= 1:
support_previous_track = False
if playlist_position == playlist_size:
support_next_track = False
self._media_content_id = track_info.get('title')
self._media_duration = _parse_timespan(
track_info.get('duration')
)
self._media_position = media_position
self._media_position_updated_at = media_position_updated_at
self._media_image_url = media_image_url
self._media_artist = media_artist
self._media_album_name = media_album_name
self._media_title = media_title
self._current_track_uri = track_info['uri']
self._current_track_is_radio_stream = is_radio_stream
self._support_previous_track = support_previous_track
self._support_next_track = support_next_track
self._support_play = support_play
self._support_shuffle_set = support_shuffle_set
self._support_stop = support_stop
self._support_pause = support_pause
self._night_sound = night_sound
self._speech_enhance = speech_enhance
self._is_playing_tv = is_playing_tv
self._is_playing_line_in = is_playing_line_in
self._source_name = source_name
self._last_avtransport_event = None
def _format_media_image_url(self, url, fallback_uri):
if url in ('', 'NOT_IMPLEMENTED', None):
if fallback_uri in ('', 'NOT_IMPLEMENTED', None):
return None
if fallback_uri.find('tts_proxy') > 0:
# If the content is a tts don't try to fetch an image from it.
return None
return 'http://{host}:{port}/getaa?s=1&u={uri}'.format(
host=self._player.ip_address,
port=1400,
uri=urllib.parse.quote(fallback_uri)
)
return url
def process_sonos_event(self, event):
"""Process a service event coming from the speaker."""
next_track_image_url = None
if event.service == self._player.avTransport:
self._last_avtransport_event = event
self._media_radio_show = None
if self._current_track_is_radio_stream:
current_track_metadata = event.variables.get(
'current_track_meta_data'
)
if current_track_metadata:
self._media_radio_show = \
current_track_metadata.radio_show.split(',')[0]
next_track_uri = event.variables.get('next_track_uri')
if next_track_uri:
next_track_image_url = self._format_media_image_url(
None,
next_track_uri
)
elif event.service == self._player.renderingControl:
if 'volume' in event.variables:
self._player_volume = int(
event.variables['volume'].get('Master')
)
if 'mute' in event.variables:
self._player_volume_muted = \
event.variables['mute'].get('Master') == '1'
self.schedule_update_ha_state(True)
if next_track_image_url:
self.preload_media_image_url(next_track_image_url)
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return self._player_volume / 100.0
@property
def is_volume_muted(self):
"""Return true if volume is muted."""
return self._player_volume_muted
@property
def shuffle(self):
"""Shuffling state."""
return True if self._player.play_mode == 'SHUFFLE' else False
@property
def media_content_id(self):
"""Content ID of current playing media."""
if self._coordinator:
return self._coordinator.media_content_id
return self._media_content_id
@property
def media_content_type(self):
"""Content type of current playing media."""
return MEDIA_TYPE_MUSIC
@property
def media_duration(self):
"""Duration of current playing media in seconds."""
if self._coordinator:
return self._coordinator.media_duration
return self._media_duration
@property
def media_position(self):
"""Position of current playing media in seconds."""
if self._coordinator:
return self._coordinator.media_position
return self._media_position
@property
def media_position_updated_at(self):
"""When was the position of the current playing media valid.
Returns value from homeassistant.util.dt.utcnow().
"""
if self._coordinator:
return self._coordinator.media_position_updated_at
return self._media_position_updated_at
@property
def media_image_url(self):
"""Image url of current playing media."""
if self._coordinator:
return self._coordinator.media_image_url
return self._media_image_url
@property
def media_artist(self):
"""Artist of current playing media, music track only."""
if self._coordinator:
return self._coordinator.media_artist
return self._media_artist
@property
def media_album_name(self):
"""Album name of current playing media, music track only."""
if self._coordinator:
return self._coordinator.media_album_name
return self._media_album_name
@property
def media_title(self):
"""Title of current playing media."""
if self._coordinator:
return self._coordinator.media_title
return self._media_title
@property
def night_sound(self):
"""Get status of Night Sound."""
return self._night_sound
@property
def speech_enhance(self):
"""Get status of Speech Enhancement."""
return self._speech_enhance
@property
def supported_features(self):
"""Flag media player features that are supported."""
if self._coordinator:
return self._coordinator.supported_features
supported = SUPPORT_SONOS
if not self._support_previous_track:
supported = supported ^ SUPPORT_PREVIOUS_TRACK
if not self._support_next_track:
supported = supported ^ SUPPORT_NEXT_TRACK
if not self._support_play:
supported = supported ^ SUPPORT_PLAY
if not self._support_shuffle_set:
supported = supported ^ SUPPORT_SHUFFLE_SET
if not self._support_stop:
supported = supported ^ SUPPORT_STOP
if not self._support_pause:
supported = supported ^ SUPPORT_PAUSE
return supported
@soco_error()
def volume_up(self):
"""Volume up media player."""
self._player.volume += self.volume_increment
@soco_error()
def volume_down(self):
"""Volume down media player."""
self._player.volume -= self.volume_increment
@soco_error()
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
self._player.volume = str(int(volume * 100))
@soco_error()
def set_shuffle(self, shuffle):
"""Enable/Disable shuffle mode."""
self._player.play_mode = 'SHUFFLE' if shuffle else 'NORMAL'
@soco_error()
def mute_volume(self, mute):
"""Mute (true) or unmute (false) media player."""
self._player.mute = mute
@soco_error()
@soco_coordinator
def select_source(self, source):
"""Select input source."""
if source == SUPPORT_SOURCE_LINEIN:
self._source_name = SUPPORT_SOURCE_LINEIN
self._player.switch_to_line_in()
elif source == SUPPORT_SOURCE_TV:
self._source_name = SUPPORT_SOURCE_TV
self._player.switch_to_tv()
else:
fav = [fav for fav in self._favorite_sources
if fav['title'] == source]
if len(fav) == 1:
src = fav.pop()
self._source_name = src['title']
if ('object.container.playlistContainer' in src['meta'] or
'object.container.album.musicAlbum' in src['meta']):
self._replace_queue_with_playlist(src)
self._player.play_from_queue(0)
else:
self._player.play_uri(src['uri'], src['meta'],
src['title'])
def _replace_queue_with_playlist(self, src):
"""Replace queue with playlist represented by src.
Playlists can't be played directly with the self._player.play_uri
API as they are actually composed of multiple URLs. Until soco has
support for playing a playlist, we'll need to parse the playlist item
and replace the current queue in order to play it.
"""
import soco
import xml.etree.ElementTree as ET
root = ET.fromstring(src['meta'])
namespaces = {'item':
'urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/',
'desc': 'urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/'}
desc = root.find('item:item', namespaces).find('desc:desc',
namespaces).text
res = [soco.data_structures.DidlResource(uri=src['uri'],
protocol_info="DUMMY")]
didl = soco.data_structures.DidlItem(title="DUMMY",
parent_id="DUMMY",
item_id=src['uri'],
desc=desc,
resources=res)
self._player.stop()
self._player.clear_queue()
self._player.add_to_queue(didl)
@property
def source_list(self):
"""List of available input sources."""
if self._coordinator:
return self._coordinator.source_list
model_name = self._speaker_info['model_name']
sources = []
if self._favorite_sources:
for fav in self._favorite_sources:
sources.append(fav['title'])
if 'PLAY:5' in model_name:
sources += [SUPPORT_SOURCE_LINEIN]
elif 'PLAYBAR' in model_name:
sources += [SUPPORT_SOURCE_LINEIN, SUPPORT_SOURCE_TV]
return sources
@property
def source(self):
"""Name of the current input source."""
if self._coordinator:
return self._coordinator.source
return self._source_name
@soco_error()
def turn_off(self):
"""Turn off media player."""
if self._support_stop:
self.media_stop()
@soco_error(UPNP_ERRORS_TO_IGNORE)
@soco_coordinator
def media_play(self):
"""Send play command."""
self._player.play()
@soco_error(UPNP_ERRORS_TO_IGNORE)
@soco_coordinator
def media_stop(self):
"""Send stop command."""
self._player.stop()
@soco_error(UPNP_ERRORS_TO_IGNORE)
@soco_coordinator
def media_pause(self):
"""Send pause command."""
self._player.pause()
@soco_error()
@soco_coordinator
def media_next_track(self):
"""Send next track command."""
self._player.next()
@soco_error()
@soco_coordinator
def media_previous_track(self):
"""Send next track command."""
self._player.previous()
@soco_error()
@soco_coordinator
def media_seek(self, position):
"""Send seek command."""
self._player.seek(str(datetime.timedelta(seconds=int(position))))
@soco_error()
@soco_coordinator
def clear_playlist(self):
"""Clear players playlist."""
self._player.clear_queue()
@soco_error()
def turn_on(self):
"""Turn the media | |
While 'I' is less than the number of instances of behavior:
file.write(self.name) # Write the event name.
file.write(",")
file.write(str(I+1)) # Write the event instance.
file.write(",")
file.write(str(self.onset[I])) # Write the time measurement.
file.write(",")
file.write(str(self.offset[I])) # Write the data value.
file.write(",")
file.write(str(self.IEI[I])) # Write the inter-event interval.
file.write(",")
file.write(str(self.totaloccurrences)) # Write the total occurrences.
file.write("\n") # Start a new line.
I=I+1 # Increase 'I'.
file.close()
##############################################################################################################################################################################################
##############################
## Functions ##
##############################
##############################################################################################################################################################################################
def getinput(string,error,kind):
# This method allows the user to input data from the terminal.
# It uses a different version of the input code depending on the version of Python.
# This is intended to make the code compatible with newer and older versions.
# It also ensures the input is the correct type.
x=False
while x==False:
if sys.hexversion > 0x03000000:
response=input(string)
else:
response=raw_input(string)
try:
response=kind(response)
except:
print(error)
x=isinstance(response,kind)
return response
def newline():
# This method prints a new line to the terminal.
# It uses a different version of the input code depending on the version of Python.
# This is intended to make the code compatible with newer and older versions.
if sys.hexversion > 0x03000000:
print()
else:
print("\n")
def empty(list):
# This method allows a function to empty a list without confusing Python's variable scope.
# If this method is not used, attempted to empty a list with list=[] causes Python to look for a non-existant local variable.
while len(list)>0:
list.pop(0)
def preparecustomdataoutput(datafile):
# This method creates and prepares the data output file.
# Note that it is not indented, and is not a function of the class "event."
# Instead it is a general function.
file=open(datafile,"w")
file.write("Event,Instance,Onset,Offset,Duration,Inter-Event Interval,Total Duration,Total Occurrences,")
file.write("\n")
file.close()
def preparecustomdataoutputforrawdata(datafile):
# Adds headings for raw data to the end of a previously prepared data file.
try:
file=open(datafile,"r")
file.close()
mode="a"
except:
mode="w"
file=open(datafile,mode) # Opens an existing file for writing. The next line writes the column headings.
if mode=="a":
file.write("\n")
file.write("Event,Instance,Time,Data,Inter-Event Interval,Total Occurrences,")
file.write("\n")
file.close()
def prepareseparatecustomrawdataoutput(datafile):
# Creates a separate file for raw data measurements.
# Unused.
file=open(datafile,"a") # Opens an existing file for writing. The next line writes the column headings.
file.write("\n")
file.write("Event,Instance,Time,Data,Total Occurrences,")
file.write("\n")
file.close()
def sorteventlist():
# Sorts the eventlist so that any rawdata events are at the end of the list
sortlist=[]
for unit in eventlist: # Add stuff to the sortlist.
sortlist.append(unit)
while len(eventlist)>0: # Remove it from the eventlist.
eventlist.pop(0)
for unit in sortlist: # Add non-rawdata back to eventlist first.
if unit.rawdata==0:
eventlist.append(unit)
for unit in sortlist: # Now add the raw data to the end of the list.
if unit.rawdata==1:
eventlist.append(unit)
def manualinput():
# This function gets all the needed information directly from the user at the terminal.
print("Memory recovery: User input mode.\n")
print("What is the name of the memory file?")
print("Make sure the file is in the same folder as the python script.")
memoryfound=0
while memoryfound==0:
mastermemoryfile=getinput("Memory file: ","",str)
try:
file=open(mastermemoryfile,"r")
memoryfound=1
file.close()
except:
print("Memory file not found.")
newline()
print("How many events do you want to save?")
numberofevents=getinput("Number of events: ","Please enter a number.",int)
newline()
if numberofevents>1:
print("Do you want to save all the events in one common data file?")
usemasterdata=getinput("Please type 'yes' or 'no': ","",str).lower()
newline()
if usemasterdata=="yes":
print("What datafile do you want to use to save all the events?")
print("The datafile will be created in the same folder as the python script.")
print("Save the file as ___.csv to easily read the data in Excel.")
masterdatafile=getinput("Data file: ","",str)
newline()
else:
print("What datafile do you want to use to save the event?")
print("The datafile will be created in the same folder as the python script.")
print("Save the file as ___.csv to easily read the data in Excel.")
masterdatafile=getinput("Data file: ","",str)
usemasterdata="yes"
newline()
while numberofevents > 0:
eventlist.append(event())
numberofevents=numberofevents-1
print("Please provide information for each event.")
for unit in eventlist:
unit.memoryfile=mastermemoryfile
numberofevents=numberofevents+1
print("Event "+(str(numberofevents)))
newline()
unit.name=getinput("Event name: ","",str)
newline()
if usemasterdata=="no":
print("What datafile do you want to use to save the event?")
print("Save the file as ___.csv to easily read the data in Excel.")
unit.datafile=getinput("Data file: ","",str)
newline()
else:
unit.datafile=masterdatafile
print("In what order was the event originally saved in the experimental program?")
print("Please provide a number such as 1 or 2.")
order=getinput("Event order: ","Please enter a number.",int)
unit.ID=chr(IDcode[order-1])
newline()
print("Was this event raw data?")
if getinput("Please type 'yes' or 'no': ","",str).lower()=="yes":
unit.rawdata=1
else:
unit.rawdata=0
unit.readmemory()
unit.processdata()
if unit.rawdata==0:
print("\nDo you want to debounce the data before saving?")
usedebounce=getinput("Please type 'yes' or 'no': ","",str).lower()
newline()
if usedebounce == "yes":
unit.debounce=1
print("What time interval, in milliseconds, do you want to used to debounce the data?")
print("The default value is 25 milliseconds")
unit.bounce=getinput("Debounce interval: ","Please enter a number.",float)
unit.bounce=unit.bounce/1000.0
newline()
unit.debouncedata()
unit.processdata()
print("Data reduced to "+str(unit.totaloccurrences)+" occurrences of "+unit.name+" with a total duration of "+str(unit.totalduration)+".")
newline()
print("All information collected.")
newline()
savedata()
empty(eventlist)
def fileinput():
# Allows the use of an input settings file to quickly process multiple files.
print("Memory recovery: File input mode.\n")
print("What is the name of the input settings file?")
print("Make sure the file is in the same folder as the python script.")
settingsfound=0
while settingsfound==0:
settingsfile=getinput("Settings file: ","",str)
try:
readsettings(settingsfile)
settingsfound=1
except:
print("Settings file not found.")
memoryfile=0
datafile=0
morefiles="yes"
while morefiles=="yes":
print("What is the name of the memory file?")
memoryfound=0
while memoryfound==0:
memoryfile=getinput("Memory file: ","",str)
try:
file=open(memoryfile,"r")
memoryfound=1
file.close()
except:
print("Memory file not found.")
newline()
print("What datafile do you want to use to save the event?")
print("Save the file as ___.csv to easily read the data in Excel.")
datafile=getinput("Data file: ","",str)
newline()
for unit in eventlist:
unit.memoryfile=memoryfile
unit.datafile=datafile
unit.readmemory()
unit.processdata()
if unit.debounce==1:
unit.debouncedata()
unit.processdata()
savedata()
print("Do you want to recover more memory files with these settings?")
morefiles=getinput("Please type 'yes' or 'no': ","",str)
morefiles=morefiles.lower()
newline()
empty(eventlist)
print("Complete.\n")
def directfileinput(settingsfile,memoryfile,datafile):
# Allows the use of an input settings file to quickly process multiple files.
# Information is entered as a parameter instead of intered in the python shell.
readsettings(settingsfile)
for unit in eventlist:
unit.memoryfile=memoryfile
unit.datafile=datafile
unit.readmemory()
unit.processdata()
if unit.debounce==1:
unit.debouncedata()
unit.processdata()
savedata()
empty(eventlist)
print("Complete.\n")
def readsettings(settingsfile):
file=open(settingsfile,"r")
file.readline()
file.readline()
file.readline()
file.readline()
file.readline()
read=file.readline() # Number of events
read=int(read[0:-1])
while read > 0:
eventlist.append(event())
read=read-1
file.readline()
for unit in eventlist: # Event Names
read=file.readline()
unit.name=read[0:-1]
file.readline()
for unit in eventlist: # Event IDs
read=file.readline()
read=int(read[0:-1])
unit.ID=chr(IDcode[read-1])
file.readline()
for unit in eventlist: # Raw data
read=file.readline()
read=(read[0:-1]).lower()
if read=='yes':
unit.rawdata=1
file.readline()
for unit in eventlist: # Debounce
read=file.readline()
read=(read[0:-1]).lower()
if read=='yes':
unit.debounce=1
file.readline() # Debounce interval
for unit in eventlist:
if unit.debounce==1:
read=file.readline()
unit.bounce=float(read[0:-1])
unit.bounce=unit.bounce/1000.0
file.close()
print("Settings imported.\n")
def savedata():
# This function saves each event to the datafile.
sorteventlist()
rawdataprepared=0
for unit in eventlist:
if unit == eventlist[0]:
if unit.rawdata==1:
preparecustomdataoutputforrawdata(unit.datafile)
else:
preparecustomdataoutput(unit.datafile)
else:
try:
file=open(unit.datafile,"r")
file.close()
except:
preparecustomdataoutput(unit.datafile)
if unit.rawdata==1:
if rawdataprepared==0:
preparecustomdataoutputforrawdata(unit.datafile)
rawdataprepared=1
unit.printrawdata()
else:
unit.printdata()
print(unit.name+" data saved.")
newline()
print("All data saved.")
###########################################################################################################################
##############################
## Variables ##
##############################
###########################################################################################################################
# This list will hold as many events as needed.
# It is used to hold all the events together in a group so that they can be processed quickly in sequence.
# It is also used to create events without explicitly giving them a name.
eventlist=[]
# These IDcodes are assigned to each event on creation, and used to quickly write information to the memory file.
# Later they are used to quickly sort through the memory file to read and process each event.
IDcode=[33, 34, 35, 36, 37, 38, 39, 42, 43, 45, 47, 58, 59, 60, 61, 62, 63, 64, 65, 66,
67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86,
87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99,100,101,102,103,104,105,106,
107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,
128,129,120,131,132,133,134,135,136,137,138,140,141,142,143,144,145,146,147,148,
149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,
169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,
189,190,191,192,193,194,195,196,197,198,199,200,201,203,204,205,206,207,208,209,
210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,228,229,230,231,
232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,
252,253,254]
##############################################################################################################################################################################################
#
# ┌────────────────────────────────────────────────────────────────────────┐
# │ TERMS OF USE: MIT License │
# ├────────────────────────────────────────────────────────────────────────┤
# │Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation │
# │files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, │
# │modify, merge, | |
debug=kwargs.get(
'debug', 1), cache=kwargs.get('cache', 'False'))
elif solvename == 'newton':
solveout = partial(self.outsolve2dcunk, databank, chunk=chunk,
ljit=ljit, debug=kwargs.get('debug', 1), type='res')
elif solvename == 'res':
solveout = partial(self.outsolve2dcunk, databank, chunk=chunk,
ljit=ljit, debug=kwargs.get('debug', 1), type='res')
if not silent:
print(f'Create compiled solving function for {self.name}')
print(f'{ljit=} {stringjit=} {transpile_reset=} {hasattr(self, f"pro_{jitname}")=}')
if ljit:
if newdata or transpile_reset or not hasattr(self, f'pro_{jitname}'):
if stringjit:
if not silent:
print(f'now makelos makes a {solvename} jit function')
self.make_los_text_jit = solveout()
# creates the los function
exec(self.make_los_text_jit, globals())
pro_jit, core_jit, epi_jit = make_los(
self.funks, self.errfunk)
else:
# breakpoint()
# if we import from a cache, we assume that the dataframe is in the same order
if transpile_reset or not hasattr(self, f'pro_{jitname}'):
jitfilename= f'modelsource/{jitname}_jitsolver.py'.replace(' ','_')
jitfile = Path(jitfilename)
jitfile.parent.mkdir(parents=True, exist_ok=True)
if not silent:
print(f'{transpile_reset=} {hasattr(self, f"pro_{jitname}")=} {jitfile.is_file()=}')
initfile = jitfile.parent /'__init__.py'
if not initfile.exists():
with open(initfile,'wt') as i:
i.write('#')
if transpile_reset or not jitfile.is_file():
solvetext0 = solveout()
solvetext = '\n'.join(
[l[4:] for l in solvetext0.split('\n')[1:-2]])
solvetext = solvetext.replace(
'cache=False', 'cache=True')
with open(jitfile, 'wt') as f:
f.write(solvetext)
importlib.invalidate_caches()
if not silent:
print(f'Writes the evaluation functon to {jitfile.is_file()=}')
if not silent:
print(f'Importing {jitfile}')
m1 = importlib.import_module('.'+jitfile.stem,jitfile.parent.name)
pro_jit, core_jit, epi_jit = m1.prolog, m1.core, m1.epilog
setattr(self, f'pro_{jitname}', pro_jit)
setattr(self, f'core_{jitname}', core_jit)
setattr(self, f'epi_{jitname}', epi_jit)
return getattr(self, f'pro_{jitname}'), getattr(self, f'core_{jitname}'), getattr(self, f'epi_{jitname}')
else:
if newdata or transpile_reset or not hasattr(self, f'pro_{nojitname}'):
if not silent:
print(f'now makelos makes a {solvename} solvefunction')
make_los_text = solveout()
self.make_los_text = make_los_text
exec(make_los_text, globals()) # creates the los function
pro, core, epi = make_los(self.funks, self.errfunk)
setattr(self, f'pro_{nojitname}', pro)
setattr(self, f'core_{nojitname}', core)
setattr(self, f'epi_{nojitname}', epi)
return getattr(self, f'pro_{nojitname}'), getattr(self, f'core_{nojitname}'), getattr(self, f'epi_{nojitname}')
def is_newdata(self, databank):
'''Determins if thius is the same databank as in the previous solution '''
if not self.eqcolumns(self.genrcolumns, databank.columns):
# fill all Missing value with 0.0
databank = insertModelVar(databank, self)
for i in [j for j in self.allvar.keys() if self.allvar[j]['matrix']]:
# Make sure columns with matrixes are of this type
databank.loc[:, i] = databank.loc[:, i].astype('O')
newdata = True
else:
newdata = False
return newdata, databank
def sim(self, databank, start='', slut='', silent=1, samedata=0, alfa=1.0, stats=False, first_test=5,
max_iterations=100, conv='*', absconv=0.01, relconv=DEFAULT_relconv,
stringjit=True, transpile_reset=False,
dumpvar='*', init=False, ldumpvar=False, dumpwith=15, dumpdecimal=5, chunk=30, ljit=False, timeon=False,
fairopt={'fair_max_iterations ': 1}, progressbar=False,**kwargs):
'''Evaluates this model on a databank from start to slut (means end in Danish).
First it finds the values in the Dataframe, then creates the evaluater function through the *outeval* function
(:func:`modelclass.model.fouteval`)
then it evaluates the function and returns the values to a the Dataframe in the databank.
The text for the evaluater function is placed in the model property **make_los_text**
where it can be inspected
in case of problems.
'''
starttimesetup = time.time()
fair_max_iterations = {**fairopt, **
kwargs}.get('fair_max_iterations ', 1)
sol_periode = self.smpl(start, slut, databank)
# breakpoint()
self.check_sim_smpl(databank)
if not silent:
print('Will start solving: ' + self.name)
# if not self.eqcolumns(self.genrcolumns,databank.columns):
# databank=insertModelVar(databank,self) # fill all Missing value with 0.0
# for i in [j for j in self.allvar.keys() if self.allvar[j]['matrix']]:
# databank.loc[:,i]=databank.loc[:,i].astype('O') # Make sure columns with matrixes are of this type
# newdata = True
# else:
# newdata = False
newdata, databank = self.is_newdata(databank)
self.pro2d, self.solve2d, self.epi2d = self.makelos(
databank, solvename='sim', ljit=ljit, stringjit=stringjit, transpile_reset=transpile_reset,
chunk=chunk, newdata=newdata,silent=silent)
values = databank.values.copy() #
self.genrcolumns = databank.columns.copy()
self.genrindex = databank.index.copy()
# convvar = [conv.upper()] if isinstance(conv,str) else [c.upper() for c in conv] if conv != [] else list(self.endogene)
convvar = self.list_names(self.coreorder, conv)
# this is how convergence is measured
convplace = [databank.columns.get_loc(c) for c in convvar]
convergence = True
endoplace = [databank.columns.get_loc(c) for c in list(self.endogene)]
# breakpoint()
if ldumpvar:
self.dumplist = []
self.dump = self.list_names(self.coreorder, dumpvar)
dumpplac = [databank.columns.get_loc(v) for v in self.dump]
ittotal = 0
endtimesetup = time.time()
starttime = time.time()
bars = '{desc}: {percentage:3.0f}%|{bar}| {n_fmt}/{total_fmt}'
for fairiteration in range(fair_max_iterations):
if fair_max_iterations >= 2:
print(f'Fair-Taylor iteration: {fairiteration}')
with tqdm(total=len(sol_periode),disable = not progressbar,desc=f'Solving {self.name}',bar_format=bars) as pbar:
for self.periode in sol_periode:
row = databank.index.get_loc(self.periode)
if ldumpvar:
self.dumplist.append([fairiteration, self.periode, int(0)]+[values[row, p]
for p in dumpplac])
if init:
for c in endoplace:
values[row, c] = values[row-1, c]
itbefore = values[row, convplace]
self.pro2d(values, values, row, 1.0)
for iteration in range(max_iterations):
with self.timer(f'Evaluate {self.periode}/{iteration} ', timeon) as t:
self.solve2d(values, values, row, alfa)
ittotal += 1
if ldumpvar:
self.dumplist.append([fairiteration, self.periode, int(iteration+1)]+[values[row, p]
for p in dumpplac])
if iteration > first_test:
itafter = values[row, convplace]
# breakpoint()
select = absconv <= np.abs(itbefore)
convergence = (
np.abs((itafter-itbefore)[select])/np.abs(itbefore[select]) <= relconv).all()
if convergence:
if not silent:
print(
f'{self.periode} Solved in {iteration} iterations')
break
itbefore = itafter
else:
print(f'{self.periode} not converged in {iteration} iterations')
self.epi2d(values, values, row, 1.0)
pbar.update()
endtime = time.time()
if ldumpvar:
self.dumpdf = pd.DataFrame(self.dumplist)
del self.dumplist
self.dumpdf.columns = ['fair', 'per', 'iteration']+self.dump
if fair_max_iterations <= 2:
self.dumpdf.drop('fair', axis=1, inplace=True)
outdf = pd.DataFrame(values, index=databank.index,
columns=databank.columns)
if stats:
self.simtime = endtime-starttime
self.setuptime = endtimesetup - starttimesetup
numberfloats = (self.flop_get['core'][-1][1]*ittotal+
len(sol_periode)*(self.flop_get['prolog'][-1][1]+self.flop_get['epilog'][-1][1]))
print(
f'Setup time (seconds) :{self.setuptime:>15,.2f}')
print(
f'Foating point operations core :{self.flop_get["core"][-1][1]:>15,}')
print(
f'Foating point operations prolog :{self.flop_get["prolog"][-1][1]:>15,}')
print(
f'Foating point operations epilog :{self.flop_get["epilog"][-1][1]:>15,}')
print(f'Simulation period :{len(sol_periode):>15,}')
print(f'Total iterations :{ittotal:>15,}')
print(f'Total floating point operations :{numberfloats:>15,}')
print(
f'Simulation time (seconds) :{self.simtime:>15,.2f}')
if self.simtime > 0.0:
print(
f'Floating point operations per second :{numberfloats/self.simtime:>15,.1f}')
if not silent:
print(self.name + ' solved ')
return outdf
@staticmethod
def grouper(iterable, n, fillvalue=''):
"Collect data into fixed-length chunks or blocks"
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx"
args = [iter(iterable)] * n
return zip_longest(*args, fillvalue=fillvalue)
def outsolve2dcunk(self, databank,
debug=1, chunk=None,
ljit=False, type='gauss', cache=False):
''' takes a list of terms and translates to a evaluater function called los
The model axcess the data through:Dataframe.value[rowindex+lag,coloumnindex] which is very efficient
'''
short, long, longer = 4*' ', 8*' ', 12 * ' '
columnsnr = self.get_columnsnr(databank)
if ljit:
thisdebug = False
else:
thisdebug = debug
#print(f'Generating source for {self.name} using ljit = {ljit} ')
def make_gaussline2(vx, nodamp=False):
''' takes a list of terms and translates to a line in a gauss-seidel solver for
simultanius models
the variables
New version to take hand of several lhs variables. Dampning is not allowed for
this. But can easely be implemented by makeing a function to multiply tupels
nodamp is for pre and epilog solutions, which should not be dampened.
'''
termer = self.allvar[vx]['terms']
assigpos = self.allvar[vx]['assigpos']
if nodamp:
ldamp = False
else:
# convention for damping equations
if pt.kw_frml_name(self.allvar[vx]['frmlname'], 'DAMP') or 'Z' in self.allvar[vx]['frmlname']:
assert assigpos == 1, 'You can not dampen equations with several left hand sides:'+vx
endovar = [t.op if t.op else (
'values[row,'+str(columnsnr[t.var])+']') for j, t in enumerate(termer) if j <= assigpos-1]
# to implemet dampning of solution
damp = '(1-alfa)*('+''.join(endovar)+')+alfa*('
ldamp = True
else:
ldamp = False
out = []
for i, t in enumerate(termer[:-1]): # drop the trailing $
if t.op:
out.append(t.op.lower())
if i == assigpos and ldamp:
out.append(damp)
if t.number:
out.append(t.number)
elif t.var:
if i > assigpos:
out.append(
'values[row'+t.lag+','+str(columnsnr[t.var])+']')
else:
out.append(
'values[row'+t.lag+','+str(columnsnr[t.var])+']')
if ldamp:
out.append(')') # the last ) in the dampening
res = ''.join(out)
return res+'\n'
def make_resline2(vx, nodamp):
''' takes a list of terms and translates to a line calculating linne
'''
termer = self.allvar[vx]['terms']
assigpos = self.allvar[vx]['assigpos']
out = []
for i, t in enumerate(termer[:-1]): # drop the trailing $
if t.op:
out.append(t.op.lower())
if t.number:
out.append(t.number)
elif t.var:
lag = int(t.lag) if t.lag else 0
if i < assigpos:
out.append(
'outvalues[row'+t.lag+','+str(columnsnr[t.var])+']')
else:
out.append(
'values[row'+t.lag+','+str(columnsnr[t.var])+']')
res = ''.join(out)
return res+'\n'
def makeafunk(name, order, linemake, chunknumber, debug=False, overhead=0, oldeqs=0, nodamp=False, ljit=False, totalchunk=1):
''' creates the source of an evaluation function
keeps tap of how many equations and lines is in the functions abowe.
This allows the errorfunction to retriewe the variable for which a math error is thrown
'''
fib1 = []
fib2 = []
if ljit:
# fib1.append((short+'print("'+f"Compiling chunk {chunknumber+1}/{totalchunk} "+'",time.strftime("%H:%M:%S")) \n') if ljit else '')
fib1.append(
short+'@jit("(f8[:,:],f8[:,:],i8,f8)",fastmath=True,cache=False)\n')
fib1.append(short + 'def '+name +
'(values,outvalues,row,alfa=1.0):\n')
# fib1.append(long + 'outvalues = values | |
<gh_stars>1-10
# -*- coding: utf-8 -*-
import xbmc, xbmcgui, xbmcplugin, xbmcaddon, urllib2, urllib, re, string, sys, os, zlib
from uuid import uuid4
from random import random,randint
from math import floor
import hashlib
import time
import simplejson
# Plugin constants
__addonname__ = "奇艺视频(QIYI)"
__addonid__ = "plugin.video.qiyi"
__addon__ = xbmcaddon.Addon(id=__addonid__)
CHANNEL_LIST = [['电影','1'], ['电视剧','2'], ['纪录片','3'], ['动漫','4'], ['音乐','5'], ['综艺','6'], ['娱乐','7'], ['旅游','9'], ['片花','10'], ['教育','12'], ['时尚','13']]
ORDER_LIST = [['4','更新时间'], ['11','热门']]
PAYTYPE_LIST = [['','全部影片'], ['0','免费影片'], ['1','会员免费'], ['2','付费点播']]
def GetHttpData(url):
print "getHttpData: " + url
req = urllib2.Request(url)
req.add_header('User-Agent', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)')
try:
response = urllib2.urlopen(req)
httpdata = response.read()
if response.headers.get('content-encoding', None) == 'gzip':
httpdata = zlib.decompress(httpdata, zlib.MAX_WBITS|32)
charset = response.headers.getparam('charset')
response.close()
except:
xbmc.log( "%s: %s (%d) [%s]" % (
__addonname__,
sys.exc_info()[ 2 ].tb_frame.f_code.co_name,
sys.exc_info()[ 2 ].tb_lineno,
sys.exc_info()[ 1 ]
), level=xbmc.LOGERROR)
return ''
match = re.compile('<meta http-equiv=["]?[Cc]ontent-[Tt]ype["]? content="text/html;[\s]?charset=(.+?)"').findall(httpdata)
if len(match)>0:
charset = match[0]
if charset:
charset = charset.lower()
if (charset != 'utf-8') and (charset != 'utf8'):
httpdata = httpdata.decode(charset, 'ignore').encode('utf8', 'ignore')
return httpdata
def urlExists(url):
try:
resp = urllib2.urlopen(url)
result = True
resp.close()
except:
result = False
return result
def searchDict(dlist,idx):
for i in range(0,len(dlist)):
if dlist[i][0] == idx:
return dlist[i][1]
return ''
def getcatList(listpage, id, cat):
# 类型(电影,纪录片,动漫,娱乐,旅游), 分类(电视剧,综艺,片花), 流派(音乐), 一级分类(教育), 行业(时尚)
match = re.compile('<h3>(类型|分类|流派|一级分类|行业):</h3>(.*?)</ul>', re.DOTALL).findall(listpage)
if id in ('3','9'): # 纪录片&旅游
catlist = re.compile('/www/' + id + '/(\d*)-[^>]+>(.*?)</a>').findall(match[0][1])
elif id in ('5','10'): # 音乐&片花
catlist = re.compile('/www/' + id + '/\d*-\d*-\d*-(\d*)-[^>]+>(.*?)</a>').findall(match[0][1])
elif id == '12': # 教育
catlist = re.compile('/www/' + id + '/\d*-\d*-(\d*)-[^>]+>(.*?)</a>').findall(match[0][1])
elif id == '13': # 时尚
catlist = re.compile('/www/' + id + '/\d*-\d*-\d*-\d*-(\d*)-[^>]+>(.*?)</a>').findall(match[0][1])
else:
catlist = re.compile('/www/' + id + '/\d*-(\d*)-[^>]+>(.*?)</a>').findall(match[0][1])
match1 = re.compile('<a href="#">(.*?)</a>').search(match[0][1])
if match1:
catlist.append((cat, match1.group(1)))
return catlist
def getareaList(listpage, id, area):
match = re.compile('<h3>地区:</h3>(.*?)</ul>', re.DOTALL).findall(listpage)
if id == '7': # 娱乐
arealist = re.compile('/www/' + id + '/\d*-\d*-(\d*)-[^>]+>(.*?)</a>').findall(match[0])
elif id in ('9','10'): # 旅游&片花
arealist = re.compile('/www/' + id + '/\d*-(\d*)-[^>]+>(.*?)</a>').findall(match[0])
else:
arealist = re.compile('/www/' + id + '/(\d*)-[^>]+>(.*?)</a>').findall(match[0])
match1 = re.compile('<a href="#">(.*?)</a>').search(match[0])
if match1:
arealist.append((area, match1.group(1)))
return arealist
def getyearList(listpage, id, year):
match = re.compile('<h3>年代:</h3>(.*?)</ul>', re.DOTALL).findall(listpage)
yearlist = re.compile('/www/' + id + '/\d*-\d*---------\d*-([\d_]*)-[^>]+>(.*?)</a>').findall(match[0])
match1 = re.compile('<a href="#">(.*?)</a>').search(match[0])
if match1:
yearlist.append((year, match1.group(1)))
return yearlist
def rootList():
for name, id in CHANNEL_LIST:
li = xbmcgui.ListItem(name)
u = sys.argv[0]+"?mode=1&name="+urllib.quote_plus(name)+"&id="+urllib.quote_plus(id)+"&cat="+urllib.quote_plus("")+"&area="+urllib.quote_plus("")+"&year="+urllib.quote_plus("")+"&order="+urllib.quote_plus("11")+"&page="+urllib.quote_plus("1")+"&paytype="+urllib.quote_plus("0")
xbmcplugin.addDirectoryItem(int(sys.argv[1]),u,li,True)
xbmcplugin.endOfDirectory(int(sys.argv[1]))
# id c1 c2 c3 c4 c5 c11 c12 c14
# 电影 1 area cat paytype year order
# 电视剧 2 area cat paytype year order
# 纪录片 3 cat paytype order
# 动漫 4 area cat ver age paytype order
# 音乐 5 area lang cat grp paytype order
# 综艺 6 area cat paytype order
# 娱乐 7 cat area paytype order
# 旅游 9 cat area paytype order
# 片花 10 area cat paytype order
# 教育 12 cat paytype order
# 时尚 13 cat paytype order
def progList(name,id,page,cat,area,year,order,paytype):
c1 = ''
c2 = ''
c3 = ''
c4 = ''
if id == '7': # 娱乐
c3 = area
elif id in ('9','10'): # 旅游&片花
c2 = area
elif id != '3': # 非纪录片
c1 = area
if id in ('3','9'): # 纪录片&旅游
c1 = cat
elif id in ('5','10'): # 音乐&片花
c4 = cat
elif id == '12': # 教育
c3 = cat
elif id == '13': # 时尚
c5 = cat
else:
c2 = cat
url = 'http://list.iqiyi.com/www/' + id + '/' + c1 + '-' + c2 + '-' + c3 + '-' + c4 + '-------' +\
paytype + '-' + year + '--' + order + '-' + page + '-1-iqiyi--.html'
currpage = int(page)
link = GetHttpData(url)
match1 = re.compile('data-key="([0-9]+)"').findall(link)
if len(match1) == 0:
totalpages = 1
else:
totalpages = int(match1[len(match1) - 1])
match = re.compile('<!-- 分类 -->(.+?)<!-- 分类 end-->', re.DOTALL).findall(link)
if match:
listpage = match[0]
else:
listpage = ''
match = re.compile('<div class="wrapper-piclist"(.+?)<!-- 页码 开始 -->', re.DOTALL).findall(link)
if match:
match = re.compile('<li[^>]*>(.+?)</li>', re.DOTALL).findall(match[0])
totalItems = len(match) + 1
if currpage > 1: totalItems = totalItems + 1
if currpage < totalpages: totalItems = totalItems + 1
if cat == '':
catstr = '全部类型'
else:
catlist = getcatList(listpage, id, cat)
catstr = searchDict(catlist, cat)
selstr = '[COLOR FFFF0000]' + catstr + '[/COLOR]'
if not (id in ('3','12','13')):
if area == '':
areastr = '全部地区'
else:
arealist = getareaList(listpage, id, area)
areastr = searchDict(arealist, area)
selstr += '/[COLOR FF00FF00]' + areastr + '[/COLOR]'
if id in ('1', '2'):
if year == '':
yearstr = '全部年份'
else:
yearlist = getyearList(listpage, id, year)
yearstr = searchDict(yearlist, year)
selstr += '/[COLOR FFFFFF00]' + yearstr + '[/COLOR]'
selstr += '/[COLOR FF00FFFF]' + searchDict(ORDER_LIST, order) + '[/COLOR]'
selstr += '/[COLOR FFFF00FF]' + searchDict(PAYTYPE_LIST, paytype) + '[/COLOR]'
li = xbmcgui.ListItem(name+'(第'+str(currpage)+'/'+str(totalpages)+'页)【'+selstr+'】(按此选择)')
u = sys.argv[0]+"?mode=4&name="+urllib.quote_plus(name)+"&id="+urllib.quote_plus(id)+"&cat="+urllib.quote_plus(cat)+"&area="+urllib.quote_plus(area)+"&year="+urllib.quote_plus(year)+"&order="+order+"&paytype="+urllib.quote_plus(paytype)+"&page="+urllib.quote_plus(listpage)
xbmcplugin.addDirectoryItem(int(sys.argv[1]), u, li, True, totalItems)
for i in range(0,len(match)):
p_name = re.compile('alt="(.+?)"').findall(match[i])[0]
p_thumb = re.compile('src\s*=\s*"(.+?)"').findall(match[i])[0]
#p_id = re.compile('data-qidanadd-albumid="(\d+)"').search(match[i]).group(1)
p_id = re.compile('href="([^"]*)"').search(match[i]).group(1)
try:
p_episode = re.compile('data-qidanadd-episode="(\d)"').search(match[i]).group(1) == '1'
except:
p_episode = False
match1 = re.compile('<span class="icon-vInfo">([^<]+)</span>').search(match[i])
if match1:
msg = match1.group(1).strip()
p_name1 = p_name + '(' + msg + ')'
if (msg.find('更新至') == 0) or (msg.find('共') == 0):
p_episode = True
else:
p_name1 = p_name
if p_episode:
mode = 2
isdir = True
p_id = re.compile('data-qidanadd-albumid="(\d+)"').search(match[i]).group(1)
else:
mode = 3
isdir = False
match1 = re.compile('<p class="dafen2">\s*<strong class="fRed"><span>(\d*)</span>([\.\d]*)</strong><span>分</span>\s*</p>').search(match[i])
if match1:
p_rating = float(match1.group(1)+match1.group(2))
else:
p_rating = 0
match1 = re.compile('<span>导演:</span>(.+?)</p>', re.DOTALL).search(match[i])
if match1:
p_director = ' / '.join(re.compile('<a [^>]+>([^<]*)</a>').findall(match1.group(1)))
else:
p_director = ''
match1 = re.compile('<em>主演:</em>(.+?)</div>', re.DOTALL).search(match[i])
if match1:
p_cast = re.compile('<a [^>]+>([^<]*)</a>').findall(match1.group(1))
else:
p_cast = []
match1 = re.compile('<span>类型:</span>(.+?)</p>', re.DOTALL).search(match[i])
if match1:
p_genre = ' / '.join(re.compile('<a [^>]+>([^<]*)</a>').findall(match1.group(1)))
else:
p_genre = ''
match1 = re.compile('<p class="s1">\s*<span>([^<]*)</span>\s*</p>').search(match[i])
if match1:
p_plot = match1.group(1)
else:
p_plot = ''
li = xbmcgui.ListItem(str(i + 1) + '.' + p_name1, iconImage = '', thumbnailImage = p_thumb)
li.setArt({ 'poster': p_thumb })
u = sys.argv[0]+"?mode="+str(mode)+"&name="+urllib.quote_plus(p_name)+"&id="+urllib.quote_plus(p_id)+"&thumb="+urllib.quote_plus(p_thumb)
li.setInfo(type = "Video", infoLabels = {"Title":p_name, "Director":p_director, "Genre":p_genre, "Plot":p_plot, "Cast":p_cast, "Rating":p_rating})
xbmcplugin.addDirectoryItem(int(sys.argv[1]), u, li, isdir, totalItems)
if currpage > 1:
li = xbmcgui.ListItem('上一页')
u = sys.argv[0]+"?mode=1&name="+urllib.quote_plus(name)+"&id="+urllib.quote_plus(id)+"&cat="+urllib.quote_plus(cat)+"&area="+urllib.quote_plus(area)+"&year="+urllib.quote_plus(year)+"&order="+order+"&page="+urllib.quote_plus(str(currpage-1))+"&paytype="+paytype
xbmcplugin.addDirectoryItem(int(sys.argv[1]), u, li, True, totalItems)
if currpage < totalpages:
li = xbmcgui.ListItem('下一页')
u = sys.argv[0]+"?mode=1&name="+urllib.quote_plus(name)+"&id="+urllib.quote_plus(id)+"&cat="+urllib.quote_plus(cat)+"&area="+urllib.quote_plus(area)+"&year="+urllib.quote_plus(year)+"&order="+order+"&page="+urllib.quote_plus(str(currpage+1))+"&paytype="+paytype
xbmcplugin.addDirectoryItem(int(sys.argv[1]), u, li, True, totalItems)
xbmcplugin.setContent(int(sys.argv[1]), 'movies')
xbmcplugin.endOfDirectory(int(sys.argv[1]))
def seriesList(name,id,thumb,page):
url = 'http://cache.video.qiyi.com/a/%s' % (id)
link = GetHttpData(url)
data = link[link.find('=')+1:]
json_response = simplejson.loads(data)
if json_response['data']['tvYear']:
p_year = int(json_response['data']['tvYear'])
else:
p_year = 0
p_director = ' / '.join(json_response['data']['directors']).encode('utf-8')
p_cast = [x.encode('utf-8') for x in json_response['data']['mainActors']]
p_plot = json_response['data']['tvDesc'].encode('utf-8')
albumType = json_response['data']['albumType']
sourceId = json_response['data']['sourceId']
if albumType in (1, 6, 9, 12, 13) and sourceId<>0:
url = 'http://cache.video.qiyi.com/jp/sdvlst/%d/%d/?categoryId=%d&sourceId=%d' % (albumType, sourceId, albumType, sourceId)
link = GetHttpData(url)
data = link[link.find('=')+1:]
json_response = simplejson.loads(data)
totalItems = len(json_response['data'])
for item in json_response['data']:
tvId = str(item['tvId'])
videoId = item['vid'].encode('utf-8')
p_id = '%s,%s' % (tvId, videoId)
p_thumb = item['aPicUrl'].encode('utf-8')
p_name = item['videoName'].encode('utf-8')
p_name = '%s %s' % (p_name, item['tvYear'].encode('utf-8'))
li = xbmcgui.ListItem(p_name, iconImage = '', thumbnailImage = p_thumb)
li.setInfo(type = "Video", infoLabels = {"Title":p_name, "Director":p_director, "Cast":p_cast, "Plot":p_plot, "Year":p_year})
u = sys.argv[0] + "?mode=3&name=" + urllib.quote_plus(p_name) + "&id=" + urllib.quote_plus(p_id)+ "&thumb=" + urllib.quote_plus(p_thumb)
xbmcplugin.addDirectoryItem(int(sys.argv[1]), u, li, False, totalItems)
else:
url = 'http://cache.video.qiyi.com/avlist/%s/%s/' % (id, page)
link = GetHttpData(url)
data = link[link.find('=')+1:]
json_response = simplejson.loads(data)
totalItems = len(json_response['data']['vlist']) + 1
totalpages = json_response['data']['pgt']
currpage = int(page)
if currpage > 1: totalItems = totalItems + 1
if currpage < totalpages: totalItems = totalItems + 1
li = xbmcgui.ListItem(name+'(第'+str(currpage)+'/'+str(totalpages)+'页)')
u = sys.argv[0]+"?mode=99"
xbmcplugin.addDirectoryItem(int(sys.argv[1]), u, li, False, totalItems)
for item in json_response['data']['vlist']:
tvId = str(item['id'])
videoId = item['vid'].encode('utf-8')
p_id = '%s,%s' % (tvId, videoId)
p_thumb = item['vpic'].encode('utf-8')
p_name = item['vn'].encode('utf-8')
if item['vt']:
p_name = '%s %s' % (p_name, item['vt'].encode('utf-8'))
li = xbmcgui.ListItem(p_name, iconImage = '', thumbnailImage = p_thumb)
li.setArt({ 'poster': thumb })
li.setInfo(type = "Video", infoLabels = {"Title":p_name, "Director":p_director, "Cast":p_cast, "Plot":p_plot, "Year":p_year})
u = sys.argv[0] + "?mode=3&name=" + urllib.quote_plus(p_name) + "&id=" + urllib.quote_plus(p_id)+ "&thumb=" + urllib.quote_plus(p_thumb)
xbmcplugin.addDirectoryItem(int(sys.argv[1]), u, li, False, totalItems)
if currpage > 1:
li = xbmcgui.ListItem('上一页')
u = sys.argv[0]+"?mode=2&name="+urllib.quote_plus(name)+"&id="+urllib.quote_plus(id)+"&thumb="+urllib.quote_plus(thumb)+"&page="+urllib.quote_plus(str(currpage-1))
xbmcplugin.addDirectoryItem(int(sys.argv[1]), u, li, True, totalItems)
if currpage < totalpages:
li = xbmcgui.ListItem('下一页')
u = sys.argv[0]+"?mode=2&name="+urllib.quote_plus(name)+"&id="+urllib.quote_plus(id)+"&thumb="+urllib.quote_plus(thumb)+"&page="+urllib.quote_plus(str(currpage+1))
xbmcplugin.addDirectoryItem(int(sys.argv[1]), u, li, True, totalItems)
xbmcplugin.setContent(int(sys.argv[1]), 'episodes')
xbmcplugin.endOfDirectory(int(sys.argv[1]))
def selResolution(items):
# stream_types = [
# {'id': '4k', 'container': 'm3u8', 'video_profile': '4k'},
# {'id': 'BD', 'container': 'm3u8', 'video_profile': '1080p'},
# {'id': | |
<gh_stars>1-10
import logging
import multiprocessing
from contextlib import closing
import scipy.sparse as ssp
import random
import pandas as pd
from math import ceil
from tqdm import tqdm
import contextlib
batcher_instance = None
log = logging.getLogger(__name__)
class Batcher(object):
"""Scheduler to handle parallel jobs on minibatches
Parameters
----------
procs: int
Number of process(es)/thread(s) for executing task in parallel. Used for multiprocessing, threading and Loky
minibatch_size: int
Expected size of each minibatch
backend: {'serial', 'multiprocessing', 'threading', 'loky', 'spark', 'dask', 'ray'}
Backend for computing the tasks
- 'serial' sequential execution without a backend scheduler
- 'multiprocessing' Python standard multiprocessing library
- 'threading' Python standard threading library
- 'loky' Loky fork of multiprocessing library
- 'joblib' Joblib fork of multiprocessing library
- 'spark' PySpark local or distributed execution
- 'dask' Dask Distributed local or distributed execution
- 'ray' Ray local or distributed execution
task_num_cpus: int
Number of CPUs to reserve per minibatch task for Ray
task_num_gpus: int
Number of GPUs to reserve per minibatch task for Ray
backend_handle: object
Backend handle for sending tasks
verbose: int
Verbosity level.
Setting verbose > 0 will display additional information depending on the specific level set.
"""
def __init__(
self,
procs=0,
minibatch_size=20000,
backend_handle=None,
backend="multiprocessing",
task_num_cpus=1,
task_num_gpus=0,
verbose=0,
):
if procs == 0 or procs is None:
procs = multiprocessing.cpu_count()
self.procs = procs
self.verbose = verbose
self.minibatch_size = minibatch_size
self.backend_handle = backend_handle
self.backend = backend
self.task_num_cpus = task_num_cpus
self.task_num_gpus = task_num_gpus
def list2indexedrdd(self, lst, minibatch_size=0):
if minibatch_size == 0:
minibatch_size = self.minibatch_size
start = 0
len_data = len(lst)
batch_count = 0
batches = []
while start < len_data:
batches.append([batch_count] + [lst[start : start + minibatch_size]])
start += minibatch_size
batch_count += 1
return self.backend_handle.parallelize(batches)
def indexedrdd2list(self, indexedrdd, sort=True):
batches = indexedrdd.collect()
if sort:
batches = sorted(batches)
return [batch[1] for batch in batches]
def split_batches(self, data, minibatch_size=None, backend=None):
"""Split data into minibatches with a specified size
Parameters
----------
data: iterable and indexable
List-like data to be split into batches. Includes backend_handleipy matrices and Pandas DataFrames.
minibatch_size: int
Expected sizes of minibatches split from the data.
backend: object
Backend to use, instead of the Batcher backend attribute
Returns
-------
data_split: list
List of minibatches, each entry is a list-like object representing the data subset in a batch.
"""
if minibatch_size is None:
minibatch_size = self.minibatch_size
if backend is None:
backend = self.backend
if isinstance(data, list) or isinstance(data, tuple) or isinstance(data, dict):
len_data = len(data)
else:
len_data = data.shape[0]
if backend == "spark":
return self.list2indexedrdd(data, minibatch_size)
if isinstance(data, pd.DataFrame):
data = [
data.iloc[x * minibatch_size : (x + 1) * minibatch_size]
for x in range(int(ceil(len_data / minibatch_size)))
]
elif isinstance(data, dict):
data = [
dict(
list(data.items())[
x * minibatch_size : min(len_data, (x + 1) * minibatch_size)
]
)
for x in range(int(ceil(len_data / minibatch_size)))
]
else:
data = [
data[x * minibatch_size : min(len_data, (x + 1) * minibatch_size)]
for x in range(int(ceil(len_data / minibatch_size)))
]
# if backend=="dask": return self.backend_handle.scatter(data)
return data
def collect_batches(self, data, backend=None, sort=True):
if backend is None:
backend = self.backend
if backend == "spark":
data = self.indexedrdd2list(data, sort)
if backend == "dask":
data = self.backend_handle.gather(data)
return data
def merge_batches(self, data):
"""Merge a list of data minibatches into one single instance representing the data
Parameters
----------
data: list
List of minibatches to merge
Returns
-------
(anonymous): sparse matrix | pd.DataFrame | list
Single complete list-like data merged from given batches
"""
if isinstance(data[0], ssp.csr_matrix):
return ssp.vstack(data)
if isinstance(data[0], pd.DataFrame) or isinstance(data[0], pd.Series):
return pd.concat(data)
return [item for sublist in data for item in sublist]
def process_batches(
self,
task,
data,
args,
backend=None,
backend_handle=None,
input_split=False,
merge_output=True,
minibatch_size=None,
procs=None,
task_num_cpus=None,
task_num_gpus=None,
verbose=None,
description="batch_apply",
):
"""
Parameters
----------
task: function
Function to apply on each minibatch with other specified arguments
data: list-like
Samples to split into minibatches and apply the specified function on
args: list
Arguments to pass to the specified function following the mini-batch
input_split: boolean, default False
If True, input data is already mapped into minibatches, otherwise data will be split on call.
merge_output: boolean, default True
If True, results from minibatches will be reduced into one single instance before return.
procs: int
Number of process(es)/thread(s) for executing task in parallel. Used for multiprocessing, threading,
Loky and Ray
minibatch_size: int
Expected size of each minibatch
backend: {'serial', 'multiprocessing', 'threading', 'loky', 'spark', 'dask', 'ray'}
Backend for computing the tasks
- 'serial' sequential execution without a backend scheduler
- 'multiprocessing' Python standard multiprocessing library
- 'threading' Python standard threading library
- 'loky' Loky fork of multiprocessing library
- 'joblib' Joblib fork of multiprocessing library
- 'spark' PySpark local or distributed execution
- 'dask' Dask Distributed local or distributed execution
- 'ray' Ray local or distributed execution
backend_handle: object
Backend handle for sending tasks
task_num_cpus: int
Number of CPUs to reserve per minibatch task for Ray
task_num_gpus: int
Number of GPUs to reserve per minibatch task for Ray
verbose: int
Verbosity level.
Setting verbose > 0 will display additional information depending on the specific level set.
Returns
-------
results: list-like | list of list-like
If merge_output is specified as True, this will be a list-like object representing
the dataset, with each entry as a sample. Otherwise this will be a list of list-like
objects, with each entry representing the results from a minibatch.
"""
if procs is None:
procs = self.procs
if backend is None:
backend = self.backend
if backend_handle is None:
backend_handle = self.backend_handle
if task_num_cpus is None:
task_num_cpus = self.task_num_cpus
if task_num_gpus is None:
task_num_gpus = self.task_num_gpus
if verbose is None:
verbose = self.verbose
if verbose > 1:
log.info(
"%s %s %s %s %s %s %s %s %s %s %s %s %s %s"
% (
" backend:",
backend,
" minibatch_size:",
self.minibatch_size,
" procs:",
procs,
" input_split:",
input_split,
" merge_output:",
merge_output,
" len(data):",
len(data),
"len(args):",
len(args),
)
)
if verbose > 10:
log.info(
"%s %s %s %s %s %s %s %s"
% (
" len(data):",
len(data),
"len(args):",
len(args),
"[type(x) for x in data]:",
[type(x) for x in data],
"[type(x) for x in args]:",
[type(x) for x in args],
)
)
if not (input_split):
if backend == "spark":
paral_params = self.split_batches(data, minibatch_size, backend="spark")
else:
paral_params = [
[data_batch] + args
for data_batch in self.split_batches(data, minibatch_size)
]
else:
if backend != "spark":
paral_params = [[data_batch] + args for data_batch in data]
else:
paral_params = data
if verbose > 10:
print(" Start task, len(paral_params)", len(paral_params))
if backend == "serial":
results = [
task(minibatch) for minibatch in tqdm(paral_params, desc=description)
]
else:
if backend == "multiprocessing":
with closing(
multiprocessing.Pool(max(1, procs), maxtasksperchild=2)
) as pool:
results = pool.map_async(task, paral_params)
pool.close()
pool.join()
results = results.get()
elif backend == "threading":
with closing(multiprocessing.dummy.Pool(max(1, procs))) as pool:
results = pool.map(task, paral_params)
pool.close()
pool.join()
elif backend == "loky":
from loky import get_reusable_executor
pool = get_reusable_executor(max_workers=max(1, procs))
results = list(pool.map(task, tqdm(paral_params, desc=description)))
elif backend == "joblib":
from joblib import Parallel, delayed
with tqdm_joblib(
tqdm(desc=description, total=len(paral_params))
) as pbar:
results = Parallel(n_jobs=procs)(
delayed(task)(params) for params in paral_params
)
elif backend == "p_tqdm":
from p_tqdm import p_map
results = p_map(task, paral_params, num_cpus=procs)
elif backend == "dask":
# if not (input_split): data= self.scatter(data)
results = [
self.backend_handle.submit(task, params)
for params in tqdm(paral_params, desc=description)
]
elif backend == "spark":
def apply_func_to_indexedrdd(batch):
return [batch[0]] + [task([batch[1]] + args)]
results = paral_params.map(apply_func_to_indexedrdd)
elif backend == "ray":
@self.backend_handle.remote(
num_cpus=task_num_cpus, num_gpus=task_num_gpus
)
def f_ray(f, data):
return f(data)
pbar = tqdm(desc=description, total=len(paral_params) + 1)
results = [
f_ray.remote(task, paral_params.pop(0))
for _ in range(min(len(paral_params), self.procs))
]
uncompleted = results
pbar.update(len(results))
while len(paral_params) > 0:
# More tasks than available processors. Queue the task calls
done, remaining = self.backend_handle.wait(
uncompleted, timeout=60, fetch_local=False
)
# if verbose > 5: print("Done, len(done), len(remaining)", len(done), len(remaining))
if len(done) == 0:
continue
done = done[0]
uncompleted = [x for x in uncompleted if x != done]
if len(remaining) > 0:
new = f_ray.remote(task, paral_params.pop(0))
pbar.update(1)
uncompleted.append(new)
results.append(new)
results = [self.backend_handle.get(x) for x in results]
pbar.update(1)
pbar.close()
# ppft currently not supported. Supporting arbitrary | |
# coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import json
import os
import pickle
import sys
import warnings
from abc import ABC, abstractmethod
from contextlib import contextmanager
from os.path import abspath, exists
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union
from ..file_utils import add_end_docstrings, is_tf_available, is_torch_available
from ..modelcard import ModelCard
from ..tokenization_utils import PreTrainedTokenizer, TruncationStrategy
from ..utils import logging
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TFAutoModel
if is_torch_available():
import torch
from ..models.auto.modeling_auto import AutoModel
if TYPE_CHECKING:
from ..modeling_tf_utils import TFPreTrainedModel
from ..modeling_utils import PreTrainedModel
logger = logging.get_logger(__name__)
def infer_framework_from_model(model, model_classes: Optional[Dict[str, type]] = None, revision: Optional[str] = None):
"""
Select framework (TensorFlow or PyTorch) to use from the :obj:`model` passed. Returns a tuple (framework, model).
If :obj:`model` is instantiated, this function will just infer the framework from the model class. Otherwise
:obj:`model` is actually a checkpoint name and this method will try to instantiate it using :obj:`model_classes`.
Since we don't want to instantiate the model twice, this model is returned for use by the pipeline.
If both frameworks are installed and available for :obj:`model`, PyTorch is selected.
Args:
model (:obj:`str`, :class:`~transformers.PreTrainedModel` or :class:`~transformers.TFPreTrainedModel`):
The model to infer the framework from. If :obj:`str`, a checkpoint name. The model to infer the framewrok
from.
model_classes (dictionary :obj:`str` to :obj:`type`, `optional`):
A mapping framework to class.
revision (:obj:`str`, `optional`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so ``revision`` can be any
identifier allowed by git.
Returns:
:obj:`Tuple`: A tuple framework, model.
"""
if not is_tf_available() and not is_torch_available():
raise RuntimeError(
"At least one of TensorFlow 2.0 or PyTorch should be installed. "
"To install TensorFlow 2.0, read the instructions at https://www.tensorflow.org/install/ "
"To install PyTorch, read the instructions at https://pytorch.org/."
)
if isinstance(model, str):
if is_torch_available() and not is_tf_available():
model_class = model_classes.get("pt", AutoModel)
model = model_class.from_pretrained(model, revision=revision)
elif is_tf_available() and not is_torch_available():
model_class = model_classes.get("tf", TFAutoModel)
model = model_class.from_pretrained(model, revision=revision)
else:
try:
model_class = model_classes.get("pt", AutoModel)
model = model_class.from_pretrained(model, revision=revision)
except OSError:
model_class = model_classes.get("tf", TFAutoModel)
model = model_class.from_pretrained(model, revision=revision)
framework = "tf" if model.__class__.__name__.startswith("TF") else "pt"
return framework, model
def get_framework(model, revision: Optional[str] = None):
"""
Select framework (TensorFlow or PyTorch) to use.
Args:
model (:obj:`str`, :class:`~transformers.PreTrainedModel` or :class:`~transformers.TFPreTrainedModel`):
If both frameworks are installed, picks the one corresponding to the model passed (either a model class or
the model name). If no specific model is provided, defaults to using PyTorch.
"""
warnings.warn(
"`get_framework` is deprecated and will be removed in v5, use `infer_framework_from_model` instead.",
FutureWarning,
)
if not is_tf_available() and not is_torch_available():
raise RuntimeError(
"At least one of TensorFlow 2.0 or PyTorch should be installed. "
"To install TensorFlow 2.0, read the instructions at https://www.tensorflow.org/install/ "
"To install PyTorch, read the instructions at https://pytorch.org/."
)
if isinstance(model, str):
if is_torch_available() and not is_tf_available():
model = AutoModel.from_pretrained(model, revision=revision)
elif is_tf_available() and not is_torch_available():
model = TFAutoModel.from_pretrained(model, revision=revision)
else:
try:
model = AutoModel.from_pretrained(model, revision=revision)
except OSError:
model = TFAutoModel.from_pretrained(model, revision=revision)
framework = "tf" if model.__class__.__name__.startswith("TF") else "pt"
return framework
def get_default_model(targeted_task: Dict, framework: Optional[str], task_options: Optional[Any]) -> str:
"""
Select a default model to use for a given task. Defaults to pytorch if ambiguous.
Args:
targeted_task (:obj:`Dict` ):
Dictionary representing the given task, that should contain default models
framework (:obj:`str`, None)
"pt", "tf" or None, representing a specific framework if it was specified, or None if we don't know yet.
task_options (:obj:`Any`, None)
Any further value required by the task to get fully specified, for instance (SRC, TGT) languages for
translation task.
Returns
:obj:`str` The model string representing the default model for this pipeline
"""
if is_torch_available() and not is_tf_available():
framework = "pt"
elif is_tf_available() and not is_torch_available():
framework = "tf"
defaults = targeted_task["default"]
if task_options:
if task_options not in defaults:
raise ValueError("The task does not provide any default models for options {}".format(task_options))
default_models = defaults[task_options]["model"]
elif "model" in defaults:
default_models = targeted_task["default"]["model"]
else:
# XXX This error message needs to be updated to be more generic if more tasks are going to become
# parametrized
raise ValueError('The task defaults can\'t be correctly selected. You probably meant "translation_XX_to_YY"')
if framework is None:
framework = "pt"
return default_models[framework]
class PipelineException(Exception):
"""
Raised by a :class:`~transformers.Pipeline` when handling __call__.
Args:
task (:obj:`str`): The task of the pipeline.
model (:obj:`str`): The model used by the pipeline.
reason (:obj:`str`): The error message to display.
"""
def __init__(self, task: str, model: str, reason: str):
super().__init__(reason)
self.task = task
self.model = model
class ArgumentHandler(ABC):
"""
Base interface for handling arguments for each :class:`~transformers.pipelines.Pipeline`.
"""
@abstractmethod
def __call__(self, *args, **kwargs):
raise NotImplementedError()
class PipelineDataFormat:
"""
Base class for all the pipeline supported data format both for reading and writing. Supported data formats
currently includes:
- JSON
- CSV
- stdin/stdout (pipe)
:obj:`PipelineDataFormat` also includes some utilities to work with multi-columns like mapping from datasets
columns to pipelines keyword arguments through the :obj:`dataset_kwarg_1=dataset_column_1` format.
Args:
output_path (:obj:`str`, `optional`): Where to save the outgoing data.
input_path (:obj:`str`, `optional`): Where to look for the input data.
column (:obj:`str`, `optional`): The column to read.
overwrite (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to overwrite the :obj:`output_path`.
"""
SUPPORTED_FORMATS = ["json", "csv", "pipe"]
def __init__(
self,
output_path: Optional[str],
input_path: Optional[str],
column: Optional[str],
overwrite: bool = False,
):
self.output_path = output_path
self.input_path = input_path
self.column = column.split(",") if column is not None else [""]
self.is_multi_columns = len(self.column) > 1
if self.is_multi_columns:
self.column = [tuple(c.split("=")) if "=" in c else (c, c) for c in self.column]
if output_path is not None and not overwrite:
if exists(abspath(self.output_path)):
raise OSError("{} already exists on disk".format(self.output_path))
if input_path is not None:
if not exists(abspath(self.input_path)):
raise OSError("{} doesnt exist on disk".format(self.input_path))
@abstractmethod
def __iter__(self):
raise NotImplementedError()
@abstractmethod
def save(self, data: Union[dict, List[dict]]):
"""
Save the provided data object with the representation for the current
:class:`~transformers.pipelines.PipelineDataFormat`.
Args:
data (:obj:`dict` or list of :obj:`dict`): The data to store.
"""
raise NotImplementedError()
def save_binary(self, data: Union[dict, List[dict]]) -> str:
"""
Save the provided data object as a pickle-formatted binary data on the disk.
Args:
data (:obj:`dict` or list of :obj:`dict`): The data to store.
Returns:
:obj:`str`: Path where the data has been saved.
"""
path, _ = os.path.splitext(self.output_path)
binary_path = os.path.extsep.join((path, "pickle"))
with open(binary_path, "wb+") as f_output:
pickle.dump(data, f_output)
return binary_path
@staticmethod
def from_str(
format: str,
output_path: Optional[str],
input_path: Optional[str],
column: Optional[str],
overwrite=False,
) -> "PipelineDataFormat":
"""
Creates an instance of the right subclass of :class:`~transformers.pipelines.PipelineDataFormat` depending on
:obj:`format`.
Args:
format: (:obj:`str`):
The format of the desired pipeline. Acceptable values are :obj:`"json"`, :obj:`"csv"` or :obj:`"pipe"`.
output_path (:obj:`str`, `optional`):
Where to save the outgoing data.
input_path (:obj:`str`, `optional`):
Where to look for the input data.
column (:obj:`str`, `optional`):
The column to read.
overwrite (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to overwrite the :obj:`output_path`.
Returns:
:class:`~transformers.pipelines.PipelineDataFormat`: The proper data format.
"""
if format == "json":
return JsonPipelineDataFormat(output_path, input_path, column, overwrite=overwrite)
elif format == "csv":
return CsvPipelineDataFormat(output_path, input_path, column, overwrite=overwrite)
elif format == "pipe":
return PipedPipelineDataFormat(output_path, input_path, column, overwrite=overwrite)
else:
raise KeyError("Unknown reader {} (Available reader are json/csv/pipe)".format(format))
class CsvPipelineDataFormat(PipelineDataFormat):
"""
Support for pipelines using CSV data format.
Args:
output_path (:obj:`str`, `optional`): Where to save the outgoing data.
input_path (:obj:`str`, `optional`): Where to look for the input data.
column (:obj:`str`, `optional`): The column to read.
overwrite (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to overwrite the :obj:`output_path`.
"""
def __init__(
| |
unconditional wait (see p274 of Tjims)
equivalent_uncond_prob = 1.0 - (1.0 - prob) * erlangc(load, c)
# Compute conditional wait time percentile for M/M/c system to use in approximation
condwaitq_pctile_mmc = mmc_qwait_pctile(equivalent_uncond_prob, arr_rate, svc_rate, c)
# First order approximation for conditional wait time in queue
condwaitq_pctile = 0.5 * (1.0 + cv2_svc_time) * condwaitq_pctile_mmc
return condwaitq_pctile
def mgc_qcondwait_pctile_secondorder_2moment(prob, arr_rate, svc_rate, c, cv2_svc_time):
"""
Return an approximate conditional queue wait percentile in M/G/c/inf system.
The approximation is based on a second order approximation using the M/M/c delay percentile.
See Tijms, H.C. (1994), "Stochastic Models: An Algorithmic Approach", <NAME> and Sons, Chichester
Chapter 4, p299-300
The percentile is conditional on Wq>0 (i.e. on event customer waits)
This approximation is based on interpolation between corresponding M/M/c and M/D/c systems.
Parameters
----------
arr_rate : float
average arrival rate to queueing system
svc_rate : float
average service rate (each server). 1/svc_rate is mean service time.
c : int
number of servers
cv2_svc_time : float
squared coefficient of variation for service time distribution
Returns
-------
float
t such that P(wait time in queue is < t | wait time in queue is > 0) = prob
"""
load = arr_rate / svc_rate
# Compute corresponding prob for unconditional wait (see p274 of Tjims)
equivalent_uncond_prob = 1.0 - (1.0 - prob) * erlangc(load, c)
# Compute conditional wait time percentile for M/M/c system to use in approximation
condwaitq_pctile_mmc = mmc_qwait_pctile(equivalent_uncond_prob, arr_rate, svc_rate, c)
# Compute conditional wait time percentile for M/D/c system to use in approximation
# TODO: implement mdc_qwait_pctile
condqwait_pctile_mdc = mdc_waitq_pctile(equivalent_uncond_prob, arr_rate, svc_rate, c)
# Second order approximation for conditional wait time in queue
condwaitq_pctile = (1.0 - cv2_svc_time) * condqwait_pctile_mdc + cv2_svc_time * condwaitq_pctile_mmc
return condwaitq_pctile
def mg1_mean_qsize(arr_rate, svc_rate, cv2_svc_time):
"""
Return the mean queue size in M/G/1/inf queue using P-K formula.
See any decent queueing book.
Parameters
----------
arr_rate : float
average arrival rate to queueing system
svc_rate : float
average service rate (each server). 1/svc_rate is mean service time.
cv2_svc_time : float
squared coefficient of variation for service time distribution
Returns
-------
float
mean number of customers in queue
"""
rho = arr_rate / svc_rate
mean_qsize = (arr_rate ** 2) * cv2_svc_time/(2 * (1.0 - rho))
return mean_qsize
def mg1_mean_qwait(arr_rate, svc_rate, cs2):
"""
Return the mean queue wait in M/G/1/inf queue using P-K formula along with Little's Law.
See any decent queueing book.
Parameters
----------
arr_rate : float
average arrival rate to queueing system
svc_rate : float
average service rate (each server). 1/svc_rate is mean service time.
cs2 : float
squared coefficient of variation for service time distribution
Returns
-------
float
mean wait time in queue
"""
mean_qsize = mg1_mean_qsize(arr_rate, svc_rate, cs2)
mean_qwait = mean_qsize / arr_rate
return mean_qwait
def gamma_0(m, rho):
"""
See p124 immediately after Eq 2.16.
:param m: int
number of servers
:param rho: float
lambda / (mu * m)
:return: float
"""
term1 = 0.24
term2 = (1 - rho) * (m - 1) * (math.sqrt(4 + 5 * m) - 2 ) / (16 * m * rho)
return min(term1, term2)
def _ggm_mean_qwait_whitt_phi_1(m, rho):
"""
See p124 immediately after Eq 2.16.
:param m: int
number of servers
:param rho: float
lambda / (mu * m)
:return: float
"""
return 1.0 + gamma_0(m, rho)
def _ggm_mean_qwait_whitt_phi_2(m, rho):
"""
See p124 immediately after Eq 2.18.
:param m: int
number of servers
:param rho: float
lambda / (mu * m)
:return: float
"""
return 1.0 - 4.0 * gamma_0(m, rho)
def _ggm_mean_qwait_whitt_phi_3(m, rho):
"""
See p124 immediately after Eq 2.20.
:param m: int
number of servers
:param rho: float
lambda / (mu * m)
:return: float
"""
term1 = _ggm_mean_qwait_whitt_phi_2(m, rho)
term2 = math.exp(-2.0 * (1 - rho) / (3.0 * rho))
return term1 * term2
def _ggm_mean_qwait_whitt_phi_4(m, rho):
"""
See p125 , Eq 2.21.
:param m: int
number of servers
:param rho: float
lambda / (mu * m)
:return: float
"""
term1 = 1.0
term2 = 0.5 * (_ggm_mean_qwait_whitt_phi_1(m, rho) + _ggm_mean_qwait_whitt_phi_3(m, rho))
return min(term1, term2)
def _ggm_mean_qwait_whitt_psi_0(c2, m, rho):
"""
See p125 , Eq 2.22.
:param c2: float
common squared CV for both arrival and service process
:param m: int
number of servers
:param rho: float
lambda / (mu * m)
:return: float
"""
if c2 >= 1:
return 1.0
else:
return _ggm_mean_qwait_whitt_phi_4(m, rho) ** (2 * (1 - c2))
def _ggm_mean_qwait_whitt_phi_0(rho, ca2, cs2, m):
"""
See p125 , Eq 2.25.
:param rho: float
lambda / (mu * m)
:param ca2: float
squared CV for arrival process
:param cs2: float
squared CV for service process
:param m: int
number of servers
:return: float
"""
if ca2 >= cs2:
term1 = _ggm_mean_qwait_whitt_phi_1(m, rho) * (4 * (ca2 - cs2) / (4 * ca2 - 3 * cs2))
term2 = (cs2 / (4 * ca2 - 3 * cs2)) * _ggm_mean_qwait_whitt_psi_0((ca2 + cs2) / 2.0, m, rho)
return term1 + term2
else:
term1 = _ggm_mean_qwait_whitt_phi_3(m, rho) * ((cs2 - ca2) / (2 * ca2 + 2 * cs2))
term2 = ( (cs2 + 3 * ca2) / (2 * ca2 + 2 * cs2) )
term3 = _ggm_mean_qwait_whitt_psi_0((ca2 + cs2) / 2.0, m, rho)
check = term2 * term3 / term1
#print (check)
return term1 + term2 * term3
def ggm_mean_qwait_whitt(arr_rate, svc_rate, m, ca2, cs2):
"""
Return the approximate mean queue wait in GI/G/c/inf queue using Whitt's 1993 approximation.
See <NAME>. "Approximations for the GI/G/m queue"
Production and Operations Management 2, 2 (Spring 1993): 114-161.
It's based on interpolations with corrections between an M/D/c, D/M/c and a M/M/c queueing systems.
Parameters
----------
arr_rate : float
average arrival rate to queueing system
svc_rate : float
average service rate (each server). 1/svc_rate is mean service time.
c : int
number of servers
ca2 : float
squared coefficient of variation for inter-arrival time distribution
cs2 : float
squared coefficient of variation for service time distribution
Returns
-------
float
mean wait time in queue
"""
rho = arr_rate / (svc_rate * float(m))
if rho >= 1.0:
raise ValueError("rho must be less than 1.0")
# Now implement Eq 2.24 on p 125
# Hack - for some reason I can't get this approximation to match Table 2 in the above
# reference for the case of D/M/m. However, if I use Eq 2.20 (specific for the D/M/m case),
# I do match the expected results. So, for now, I'll trap for this case.
if ca2 == 0 and cs2 == 1:
qwait = dmm_mean_qwait_whitt(arr_rate, svc_rate, m)
else:
term1 = _ggm_mean_qwait_whitt_phi_0(rho, ca2, cs2, m)
term2 = 0.5 * (ca2 + cs2)
term3 = mmc_mean_qwait(arr_rate, svc_rate, m)
qwait = term1 * term2 * term3
return qwait
def ggm_prob_wait_whitt(arr_rate, svc_rate, m, ca2, cs2):
"""
Return the approximate P(Wq > 0) in GI/G/c/inf queue using Whitt's 1993 approximation.
See <NAME>. "Approximations for the GI/G/m queue"
Production and Operations Management 2, 2 (Spring 1993): 114-161.
It's based on interpolations with corrections between an M/D/c, D/M/c and a M/M/c queueing systems.
Parameters
----------
arr_rate : float
average arrival rate to queueing system
svc_rate : float
average service rate (each server). 1/svc_rate is mean service time.
c : int
number of servers
ca2 : float
squared coefficient of variation for inter-arrival time distribution
cs2 : float
squared coefficient of variation for service time distribution
Returns
-------
float
mean wait time in queue
"""
rho = arr_rate / (svc_rate * float(m))
# For ca2 = 1 (e.g. Poisson arrivals), Whitt uses fact that Erlang-C works well for M/G/c
if ca2 == 1:
pwait = mgc_prob_wait_erlangc(arr_rate, svc_rate, m)
else:
pi = _ggm_prob_wait_whitt_pi(m, rho, ca2, cs2)
pwait = min(pi, 1)
return pwait
def _ggm_prob_wait_whitt_z(ca2, cs2):
"""
Equation 3.8 on p139 of Whitt (1993). Used in approximation for P(Wq > 0) in GI/G/c/inf queue.
See Whitt, Ward. "Approximations for the GI/G/m queue"
Production and Operations Management 2, 2 (Spring 1993): 114-161.
Parameters
----------
ca2 : float
squared coefficient of | |
self: TNGraph const *
"""
return _snap.TNGraph_EndEI(self)
def GetEI(self, *args):
"""
GetEI(TNGraph self, int const & SrcNId, int const & DstNId) -> TNGraph::TEdgeI
Parameters:
SrcNId: int const &
DstNId: int const &
"""
return _snap.TNGraph_GetEI(self, *args)
def GetRndNId(self, *args):
"""
GetRndNId(TNGraph self, TRnd Rnd=Rnd) -> int
Parameters:
Rnd: TRnd &
GetRndNId(TNGraph self) -> int
Parameters:
self: TNGraph *
"""
return _snap.TNGraph_GetRndNId(self, *args)
def GetRndNI(self, *args):
"""
GetRndNI(TNGraph self, TRnd Rnd=Rnd) -> TNGraph::TNodeI
Parameters:
Rnd: TRnd &
GetRndNI(TNGraph self) -> TNGraph::TNodeI
Parameters:
self: TNGraph *
"""
return _snap.TNGraph_GetRndNI(self, *args)
def GetNIdV(self, *args):
"""
GetNIdV(TNGraph self, TIntV NIdV)
Parameters:
NIdV: TIntV &
"""
return _snap.TNGraph_GetNIdV(self, *args)
def Empty(self):
"""
Empty(TNGraph self) -> bool
Parameters:
self: TNGraph const *
"""
return _snap.TNGraph_Empty(self)
def Clr(self):
"""
Clr(TNGraph self)
Parameters:
self: TNGraph *
"""
return _snap.TNGraph_Clr(self)
def Reserve(self, *args):
"""
Reserve(TNGraph self, int const & Nodes, int const & Edges)
Parameters:
Nodes: int const &
Edges: int const &
"""
return _snap.TNGraph_Reserve(self, *args)
def ReserveNIdInDeg(self, *args):
"""
ReserveNIdInDeg(TNGraph self, int const & NId, int const & InDeg)
Parameters:
NId: int const &
InDeg: int const &
"""
return _snap.TNGraph_ReserveNIdInDeg(self, *args)
def ReserveNIdOutDeg(self, *args):
"""
ReserveNIdOutDeg(TNGraph self, int const & NId, int const & OutDeg)
Parameters:
NId: int const &
OutDeg: int const &
"""
return _snap.TNGraph_ReserveNIdOutDeg(self, *args)
def Defrag(self, OnlyNodeLinks=False):
"""
Defrag(TNGraph self, bool const & OnlyNodeLinks=False)
Parameters:
OnlyNodeLinks: bool const &
Defrag(TNGraph self)
Parameters:
self: TNGraph *
"""
return _snap.TNGraph_Defrag(self, OnlyNodeLinks)
def IsOk(self, ThrowExcept=True):
"""
IsOk(TNGraph self, bool const & ThrowExcept=True) -> bool
Parameters:
ThrowExcept: bool const &
IsOk(TNGraph self) -> bool
Parameters:
self: TNGraph const *
"""
return _snap.TNGraph_IsOk(self, ThrowExcept)
def Dump(self, *args):
"""
Dump(TNGraph self, FILE * OutF=stdout)
Parameters:
OutF: FILE *
Dump(TNGraph self)
Parameters:
self: TNGraph const *
"""
return _snap.TNGraph_Dump(self, *args)
def GetSmallGraph():
"""GetSmallGraph() -> PNGraph"""
return _snap.TNGraph_GetSmallGraph()
GetSmallGraph = staticmethod(GetSmallGraph)
__swig_destroy__ = _snap.delete_TNGraph
TNGraph.Save = new_instancemethod(_snap.TNGraph_Save,None,TNGraph)
TNGraph.HasFlag = new_instancemethod(_snap.TNGraph_HasFlag,None,TNGraph)
TNGraph.GetNodes = new_instancemethod(_snap.TNGraph_GetNodes,None,TNGraph)
TNGraph.AddNode = new_instancemethod(_snap.TNGraph_AddNode,None,TNGraph)
TNGraph.DelNode = new_instancemethod(_snap.TNGraph_DelNode,None,TNGraph)
TNGraph.IsNode = new_instancemethod(_snap.TNGraph_IsNode,None,TNGraph)
TNGraph.BegNI = new_instancemethod(_snap.TNGraph_BegNI,None,TNGraph)
TNGraph.EndNI = new_instancemethod(_snap.TNGraph_EndNI,None,TNGraph)
TNGraph.GetNI = new_instancemethod(_snap.TNGraph_GetNI,None,TNGraph)
TNGraph.GetMxNId = new_instancemethod(_snap.TNGraph_GetMxNId,None,TNGraph)
TNGraph.GetEdges = new_instancemethod(_snap.TNGraph_GetEdges,None,TNGraph)
TNGraph.AddEdge = new_instancemethod(_snap.TNGraph_AddEdge,None,TNGraph)
TNGraph.DelEdge = new_instancemethod(_snap.TNGraph_DelEdge,None,TNGraph)
TNGraph.IsEdge = new_instancemethod(_snap.TNGraph_IsEdge,None,TNGraph)
TNGraph.BegEI = new_instancemethod(_snap.TNGraph_BegEI,None,TNGraph)
TNGraph.EndEI = new_instancemethod(_snap.TNGraph_EndEI,None,TNGraph)
TNGraph.GetEI = new_instancemethod(_snap.TNGraph_GetEI,None,TNGraph)
TNGraph.GetRndNId = new_instancemethod(_snap.TNGraph_GetRndNId,None,TNGraph)
TNGraph.GetRndNI = new_instancemethod(_snap.TNGraph_GetRndNI,None,TNGraph)
TNGraph.GetNIdV = new_instancemethod(_snap.TNGraph_GetNIdV,None,TNGraph)
TNGraph.Empty = new_instancemethod(_snap.TNGraph_Empty,None,TNGraph)
TNGraph.Clr = new_instancemethod(_snap.TNGraph_Clr,None,TNGraph)
TNGraph.Reserve = new_instancemethod(_snap.TNGraph_Reserve,None,TNGraph)
TNGraph.ReserveNIdInDeg = new_instancemethod(_snap.TNGraph_ReserveNIdInDeg,None,TNGraph)
TNGraph.ReserveNIdOutDeg = new_instancemethod(_snap.TNGraph_ReserveNIdOutDeg,None,TNGraph)
TNGraph.Defrag = new_instancemethod(_snap.TNGraph_Defrag,None,TNGraph)
TNGraph.IsOk = new_instancemethod(_snap.TNGraph_IsOk,None,TNGraph)
TNGraph.Dump = new_instancemethod(_snap.TNGraph_Dump,None,TNGraph)
TNGraph_swigregister = _snap.TNGraph_swigregister
TNGraph_swigregister(TNGraph)
def TNGraph_New(*args):
"""
New() -> PNGraph
TNGraph_New(int const & Nodes, int const & Edges) -> PNGraph
Parameters:
Nodes: int const &
Edges: int const &
"""
return _snap.TNGraph_New(*args)
def TNGraph_Load(*args):
"""
TNGraph_Load(TSIn SIn) -> PNGraph
Parameters:
SIn: TSIn &
"""
return _snap.TNGraph_Load(*args)
def TNGraph_GetSmallGraph():
"""TNGraph_GetSmallGraph() -> PNGraph"""
return _snap.TNGraph_GetSmallGraph()
class TNEGraph(object):
"""Proxy of C++ TNEGraph class"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
__init__(TNEGraph self) -> TNEGraph
__init__(TNEGraph self, int const & Nodes, int const & Edges) -> TNEGraph
Parameters:
Nodes: int const &
Edges: int const &
__init__(TNEGraph self, TNEGraph Graph) -> TNEGraph
Parameters:
Graph: TNEGraph const &
__init__(TNEGraph self, TSIn SIn) -> TNEGraph
Parameters:
SIn: TSIn &
"""
_snap.TNEGraph_swiginit(self,_snap.new_TNEGraph(*args))
def Save(self, *args):
"""
Save(TNEGraph self, TSOut SOut)
Parameters:
SOut: TSOut &
"""
return _snap.TNEGraph_Save(self, *args)
def New(*args):
"""
New() -> PNEGraph
New(int const & Nodes, int const & Edges) -> PNEGraph
Parameters:
Nodes: int const &
Edges: int const &
"""
return _snap.TNEGraph_New(*args)
New = staticmethod(New)
def Load(*args):
"""
Load(TSIn SIn) -> PNEGraph
Parameters:
SIn: TSIn &
"""
return _snap.TNEGraph_Load(*args)
Load = staticmethod(Load)
def HasFlag(self, *args):
"""
HasFlag(TNEGraph self, TGraphFlag const & Flag) -> bool
Parameters:
Flag: TGraphFlag const &
"""
return _snap.TNEGraph_HasFlag(self, *args)
def GetNodes(self):
"""
GetNodes(TNEGraph self) -> int
Parameters:
self: TNEGraph const *
"""
return _snap.TNEGraph_GetNodes(self)
def AddNode(self, *args):
"""
AddNode(TNEGraph self, int NId=-1) -> int
Parameters:
NId: int
AddNode(TNEGraph self) -> int
AddNode(TNEGraph self, TNEGraph::TNodeI const & NodeId) -> int
Parameters:
NodeId: TNEGraph::TNodeI const &
"""
return _snap.TNEGraph_AddNode(self, *args)
def DelNode(self, *args):
"""
DelNode(TNEGraph self, int const & NId)
Parameters:
NId: int const &
DelNode(TNEGraph self, TNEGraph::TNode const & NodeI)
Parameters:
NodeI: TNEGraph::TNode const &
"""
return _snap.TNEGraph_DelNode(self, *args)
def IsNode(self, *args):
"""
IsNode(TNEGraph self, int const & NId) -> bool
Parameters:
NId: int const &
"""
return _snap.TNEGraph_IsNode(self, *args)
def BegNI(self):
"""
BegNI(TNEGraph self) -> TNEGraph::TNodeI
Parameters:
self: TNEGraph const *
"""
return _snap.TNEGraph_BegNI(self)
def EndNI(self):
"""
EndNI(TNEGraph self) -> TNEGraph::TNodeI
Parameters:
self: TNEGraph const *
"""
return _snap.TNEGraph_EndNI(self)
def GetNI(self, *args):
"""
GetNI(TNEGraph self, int const & NId) -> TNEGraph::TNodeI
Parameters:
NId: int const &
"""
return _snap.TNEGraph_GetNI(self, *args)
def GetMxNId(self):
"""
GetMxNId(TNEGraph self) -> int
Parameters:
self: TNEGraph const *
"""
return _snap.TNEGraph_GetMxNId(self)
def GetEdges(self):
"""
GetEdges(TNEGraph self) -> int
Parameters:
self: TNEGraph const *
"""
return _snap.TNEGraph_GetEdges(self)
def AddEdge(self, *args):
"""
AddEdge(TNEGraph self, int const & SrcNId, int const & DstNId, int EId=-1) -> int
Parameters:
SrcNId: int const &
DstNId: int const &
EId: int
AddEdge(TNEGraph self, int const & SrcNId, int const & DstNId) -> int
Parameters:
SrcNId: int const &
DstNId: int const &
AddEdge(TNEGraph self, TNEGraph::TEdgeI const & EdgeI) -> int
Parameters:
EdgeI: TNEGraph::TEdgeI const &
"""
return _snap.TNEGraph_AddEdge(self, *args)
def DelEdge(self, *args):
"""
DelEdge(TNEGraph self, int const & EId)
Parameters:
EId: int const &
DelEdge(TNEGraph self, int const & SrcNId, int const & DstNId, bool const & IsDir=True)
Parameters:
SrcNId: int const &
DstNId: int const &
IsDir: bool const &
DelEdge(TNEGraph self, int const & SrcNId, int const & DstNId)
Parameters:
SrcNId: int const &
DstNId: int const &
"""
return _snap.TNEGraph_DelEdge(self, *args)
def IsEdge(self, *args):
"""
IsEdge(TNEGraph self, int const & EId) -> bool
Parameters:
EId: int const &
IsEdge(TNEGraph self, int const & SrcNId, int const & DstNId, bool const & IsDir=True) -> bool
Parameters:
SrcNId: int const &
DstNId: int const &
IsDir: bool const &
IsEdge(TNEGraph self, int const & SrcNId, int const & DstNId) -> bool
Parameters:
SrcNId: int const &
DstNId: int const &
IsEdge(TNEGraph self, int const & SrcNId, int const & DstNId, int & EId, bool const & IsDir=True) -> bool
Parameters:
SrcNId: int const &
DstNId: int const &
EId: int &
IsDir: bool const &
IsEdge(TNEGraph self, int const & SrcNId, int const & DstNId, int & EId) -> bool
Parameters:
SrcNId: int const &
DstNId: int const &
EId: int &
"""
return _snap.TNEGraph_IsEdge(self, *args)
def GetEId(self, *args):
"""
GetEId(TNEGraph self, int const & SrcNId, int const & DstNId) -> int
Parameters:
SrcNId: int const &
DstNId: int const &
"""
return _snap.TNEGraph_GetEId(self, *args)
def BegEI(self):
"""
BegEI(TNEGraph self) -> TNEGraph::TEdgeI
Parameters:
self: TNEGraph const *
"""
return _snap.TNEGraph_BegEI(self)
def EndEI(self):
"""
EndEI(TNEGraph self) -> TNEGraph::TEdgeI
Parameters:
self: TNEGraph const *
"""
return _snap.TNEGraph_EndEI(self)
def GetEI(self, *args):
"""
GetEI(TNEGraph self, int const & EId) -> TNEGraph::TEdgeI
Parameters:
EId: int const &
GetEI(TNEGraph self, int const & SrcNId, int const & DstNId) -> TNEGraph::TEdgeI
Parameters:
SrcNId: int const &
DstNId: int const &
"""
return _snap.TNEGraph_GetEI(self, *args)
def GetRndNId(self, *args):
"""
GetRndNId(TNEGraph self, TRnd Rnd=Rnd) -> int
Parameters:
Rnd: TRnd &
GetRndNId(TNEGraph self) -> int
Parameters:
self: TNEGraph *
"""
return _snap.TNEGraph_GetRndNId(self, *args)
def GetRndNI(self, *args):
"""
GetRndNI(TNEGraph self, TRnd Rnd=Rnd) -> TNEGraph::TNodeI
Parameters:
Rnd: TRnd &
GetRndNI(TNEGraph self) -> TNEGraph::TNodeI
Parameters:
self: TNEGraph *
"""
return _snap.TNEGraph_GetRndNI(self, *args)
def GetRndEId(self, *args):
"""
GetRndEId(TNEGraph self, TRnd Rnd=Rnd) -> int
Parameters:
Rnd: TRnd &
GetRndEId(TNEGraph self) -> int
Parameters:
self: TNEGraph *
"""
return _snap.TNEGraph_GetRndEId(self, *args)
def GetRndEI(self, *args):
"""
GetRndEI(TNEGraph self, TRnd Rnd=Rnd) -> TNEGraph::TEdgeI
Parameters:
Rnd: TRnd &
GetRndEI(TNEGraph self) -> TNEGraph::TEdgeI
Parameters:
self: TNEGraph *
"""
return _snap.TNEGraph_GetRndEI(self, *args)
def GetNIdV(self, *args):
"""
GetNIdV(TNEGraph self, TIntV NIdV)
Parameters:
NIdV: TIntV &
"""
return _snap.TNEGraph_GetNIdV(self, *args)
def GetEIdV(self, *args):
"""
GetEIdV(TNEGraph self, TIntV EIdV)
Parameters:
EIdV: TIntV &
"""
return _snap.TNEGraph_GetEIdV(self, *args)
def Empty(self):
"""
Empty(TNEGraph self) -> bool
Parameters:
self: TNEGraph const *
"""
return _snap.TNEGraph_Empty(self)
def Clr(self):
"""
Clr(TNEGraph self)
Parameters:
self: TNEGraph *
"""
return _snap.TNEGraph_Clr(self)
def Reserve(self, *args):
| |
<gh_stars>0
# Copyright 2013-present Barefoot Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Thrift SAI interface basic tests
"""
import switch_sai_thrift
import time
import sys
import logging
import unittest
import random
import sai_base_test
from ptf import config
from ptf.testutils import *
from ptf.thriftutils import *
import os
from switch_sai_thrift.ttypes import *
from switch_sai_thrift.sai_headers import *
this_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(this_dir, '..'))
from common.utils import *
from common.sai_utils import *
from erspan3 import *
this_dir = os.path.dirname(os.path.abspath(__file__))
cpu_port=64
switch_inited=0
port_list = []
table_attr_list = []
is_bmv2 = ('BMV2_TEST' in os.environ) and (int(os.environ['BMV2_TEST']) == 1)
def switch_init(client):
global switch_inited
if switch_inited:
return
switch_attr_list = client.sai_thrift_get_switch_attribute()
attr_list = switch_attr_list.attr_list
for attribute in attr_list:
if attribute.id == 0:
print "max ports: " + attribute.value.u32
elif attribute.id == 1:
for x in attribute.value.objlist.object_id_list:
port_list.append(x)
else:
print "unknown switch attribute"
attr_value = sai_thrift_attribute_value_t(mac='00:77:66:55:44:33')
attr = sai_thrift_attribute_t(id=SAI_SWITCH_ATTR_SRC_MAC_ADDRESS, value=attr_value)
client.sai_thrift_set_switch_attribute(attr)
switch_inited = 1
class L2AccessToAccessVlanTest(sai_base_test.ThriftInterfaceDataPlane):
def runTest(self):
print
print "Sending L2 packet port 1 -> port 2 [access vlan=10])"
switch_init(self.client)
vlan_id = 10
port1 = port_list[1]
port2 = port_list[2]
mac1 = '00:11:11:11:11:11'
mac2 = '00:22:22:22:22:22'
mac_action = SAI_PACKET_ACTION_FORWARD
self.client.sai_thrift_create_vlan(vlan_id)
vlan_member1 = sai_thrift_create_vlan_member(self.client, vlan_id, port1, SAI_VLAN_TAGGING_MODE_UNTAGGED)
vlan_member2 = sai_thrift_create_vlan_member(self.client, vlan_id, port2, SAI_VLAN_TAGGING_MODE_UNTAGGED)
sai_thrift_create_fdb(self.client, vlan_id, mac1, port1, mac_action)
sai_thrift_create_fdb(self.client, vlan_id, mac2, port2, mac_action)
pkt = simple_tcp_packet(eth_dst='00:11:11:11:11:11',
eth_src='00:22:22:22:22:22',
ip_dst='10.0.0.1',
ip_id=101,
ip_ttl=64)
try:
send_packet(self, 2, str(pkt))
verify_packets(self, pkt, [1])
finally:
sai_thrift_delete_fdb(self.client, vlan_id, mac1, port1)
sai_thrift_delete_fdb(self.client, vlan_id, mac2, port2)
self.client.sai_thrift_remove_vlan_member(vlan_member1)
self.client.sai_thrift_remove_vlan_member(vlan_member2)
self.client.sai_thrift_delete_vlan(vlan_id)
class L2TrunkToTrunkVlanTest(sai_base_test.ThriftInterfaceDataPlane):
def runTest(self):
print
print "Sending L2 packet - port 1 -> port 2 [trunk vlan=10])"
switch_init(self.client)
vlan_id = 10
port1 = port_list[1]
port2 = port_list[2]
mac1 = '00:11:11:11:11:11'
mac2 = '00:22:22:22:22:22'
mac_action = SAI_PACKET_ACTION_FORWARD
self.client.sai_thrift_create_vlan(vlan_id)
vlan_member1 = sai_thrift_create_vlan_member(self.client, vlan_id, port1, SAI_VLAN_TAGGING_MODE_TAGGED)
vlan_member2 = sai_thrift_create_vlan_member(self.client, vlan_id, port2, SAI_VLAN_TAGGING_MODE_TAGGED)
sai_thrift_create_fdb(self.client, vlan_id, mac1, port1, mac_action)
sai_thrift_create_fdb(self.client, vlan_id, mac2, port2, mac_action)
pkt = simple_tcp_packet(eth_dst='00:11:11:11:11:11',
eth_src='00:22:22:22:22:22',
dl_vlan_enable=True,
vlan_vid=10,
ip_dst='10.0.0.1',
ip_id=102,
ip_ttl=64)
exp_pkt = simple_tcp_packet(eth_dst='00:11:11:11:11:11',
eth_src='00:22:22:22:22:22',
ip_dst='10.0.0.1',
ip_id=102,
dl_vlan_enable=True,
vlan_vid=10,
ip_ttl=64)
try:
send_packet(self, 2, str(pkt))
verify_packets(self, exp_pkt, [1])
finally:
sai_thrift_delete_fdb(self.client, vlan_id, mac1, port1)
sai_thrift_delete_fdb(self.client, vlan_id, mac2, port2)
self.client.sai_thrift_remove_vlan_member(vlan_member1)
self.client.sai_thrift_remove_vlan_member(vlan_member2)
self.client.sai_thrift_delete_vlan(vlan_id)
class L2AccessToTrunkVlanTest(sai_base_test.ThriftInterfaceDataPlane):
def runTest(self):
print
print "Sending L2 packet - port 1 -> port 2 [trunk vlan=10])"
switch_init(self.client)
vlan_id = 10
port1 = port_list[1]
port2 = port_list[2]
mac1 = '00:11:11:11:11:11'
mac2 = '00:22:22:22:22:22'
mac_action = SAI_PACKET_ACTION_FORWARD
self.client.sai_thrift_create_vlan(vlan_id)
vlan_member1 = sai_thrift_create_vlan_member(self.client, vlan_id, port1, SAI_VLAN_TAGGING_MODE_TAGGED)
vlan_member2 = sai_thrift_create_vlan_member(self.client, vlan_id, port2, SAI_VLAN_TAGGING_MODE_UNTAGGED)
sai_thrift_create_fdb(self.client, vlan_id, mac1, port1, mac_action)
sai_thrift_create_fdb(self.client, vlan_id, mac2, port2, mac_action)
pkt = simple_tcp_packet(eth_dst='00:11:11:11:11:11',
eth_src='00:22:22:22:22:22',
ip_dst='10.0.0.1',
ip_id=102,
ip_ttl=64)
exp_pkt = simple_tcp_packet(eth_dst='00:11:11:11:11:11',
eth_src='00:22:22:22:22:22',
ip_dst='10.0.0.1',
dl_vlan_enable=True,
vlan_vid=10,
ip_id=102,
ip_ttl=64,
pktlen=104)
try:
send_packet(self, 2, str(pkt))
verify_packets(self, exp_pkt, [1])
finally:
sai_thrift_delete_fdb(self.client, vlan_id, mac1, port1)
sai_thrift_delete_fdb(self.client, vlan_id, mac2, port2)
self.client.sai_thrift_remove_vlan_member(vlan_member1)
self.client.sai_thrift_remove_vlan_member(vlan_member2)
self.client.sai_thrift_delete_vlan(vlan_id)
class L2TrunkToAccessVlanTest(sai_base_test.ThriftInterfaceDataPlane):
def runTest(self):
print
print "Sending L2 packet - port 1 -> port 2 [trunk vlan=10])"
switch_init(self.client)
vlan_id = 10
port1 = port_list[1]
port2 = port_list[2]
mac1 = '00:11:11:11:11:11'
mac2 = '00:22:22:22:22:22'
mac_action = SAI_PACKET_ACTION_FORWARD
self.client.sai_thrift_create_vlan(vlan_id)
vlan_member1 = sai_thrift_create_vlan_member(self.client, vlan_id, port1, SAI_VLAN_TAGGING_MODE_UNTAGGED)
vlan_member2 = sai_thrift_create_vlan_member(self.client, vlan_id, port2, SAI_VLAN_TAGGING_MODE_TAGGED)
sai_thrift_create_fdb(self.client, vlan_id, mac1, port1, mac_action)
sai_thrift_create_fdb(self.client, vlan_id, mac2, port2, mac_action)
pkt = simple_tcp_packet(eth_dst='00:11:11:11:11:11',
eth_src='00:22:22:22:22:22',
dl_vlan_enable=True,
vlan_vid=10,
ip_dst='10.0.0.1',
ip_id=102,
ip_ttl=64)
exp_pkt = simple_tcp_packet(eth_dst='00:11:11:11:11:11',
eth_src='00:22:22:22:22:22',
ip_dst='10.0.0.1',
ip_id=102,
ip_ttl=64,
pktlen=96)
try:
send_packet(self, 2, str(pkt))
verify_packets(self, exp_pkt, [1])
finally:
sai_thrift_delete_fdb(self.client, vlan_id, mac1, port1)
sai_thrift_delete_fdb(self.client, vlan_id, mac2, port2)
self.client.sai_thrift_remove_vlan_member(vlan_member1)
self.client.sai_thrift_remove_vlan_member(vlan_member2)
self.client.sai_thrift_delete_vlan(vlan_id)
class L2StpTest(sai_base_test.ThriftInterfaceDataPlane):
def runTest(self):
print
print "Sending L2 packet - port 1 -> port 2 [trunk vlan=10])"
switch_init(self.client)
vlan_id = 10
port1 = port_list[1]
port2 = port_list[2]
mac1 = '00:11:11:11:11:11'
mac2 = '00:22:22:22:22:22'
vlan_list = [vlan_id]
mac_action = SAI_PACKET_ACTION_FORWARD
self.client.sai_thrift_create_vlan(vlan_id)
vlan_member1 = sai_thrift_create_vlan_member(self.client, vlan_id, port1, SAI_VLAN_TAGGING_MODE_UNTAGGED)
vlan_member2 = sai_thrift_create_vlan_member(self.client, vlan_id, port2, SAI_VLAN_TAGGING_MODE_UNTAGGED)
stp_id = sai_thrift_create_stp(self.client, vlan_list)
stp_port_id1 = sai_thrift_create_stp_port(self.client, stp_id, port1, SAI_PORT_STP_STATE_FORWARDING)
stp_port_id2 = sai_thrift_create_stp_port(self.client, stp_id, port2, SAI_PORT_STP_STATE_FORWARDING)
sai_thrift_create_fdb(self.client, vlan_id, mac1, port1, mac_action)
sai_thrift_create_fdb(self.client, vlan_id, mac2, port2, mac_action)
try:
pkt = simple_tcp_packet(eth_dst='00:11:11:11:11:11',
eth_src='00:22:22:22:22:22',
ip_dst='10.0.0.1',
ip_id=113,
ip_ttl=64)
exp_pkt = simple_tcp_packet(eth_dst='00:11:11:11:11:11',
eth_src='00:22:22:22:22:22',
ip_dst='10.0.0.1',
ip_id=113,
ip_ttl=64)
send_packet(self, 2, str(pkt))
verify_packets(self, exp_pkt, [1])
stp_port_id2 = sai_thrift_create_stp_port(self.client, stp_id, port2, SAI_PORT_STP_STATE_FORWARDING)
stp_port_id2 = sai_thrift_create_stp_port(self.client, stp_id, port2, SAI_PORT_STP_STATE_BLOCKING)
print "Sending packet port 1 (blocked) -> port 2 (192.168.0.1 -> 10.0.0.1 [id = 101])"
pkt = simple_tcp_packet(eth_dst='00:11:11:11:11:11',
eth_src='00:22:22:22:22:22',
ip_dst='10.0.0.1',
ip_id=113,
ip_ttl=64)
exp_pkt = simple_tcp_packet(eth_dst='00:11:11:11:11:11',
eth_src='00:22:22:22:22:22',
ip_dst='10.0.0.1',
ip_id=113,
ip_ttl=64)
send_packet(self, 2, str(pkt))
verify_packets(self, exp_pkt, [])
finally:
sai_thrift_delete_fdb(self.client, vlan_id, mac1, port1)
sai_thrift_delete_fdb(self.client, vlan_id, mac2, port2)
self.client.sai_thrift_remove_stp_port(stp_port_id1)
self.client.sai_thrift_remove_stp_port(stp_port_id2)
self.client.sai_thrift_remove_stp(stp_id)
self.client.sai_thrift_remove_vlan_member(vlan_member1)
self.client.sai_thrift_remove_vlan_member(vlan_member2)
self.client.sai_thrift_delete_vlan(vlan_id)
class L3IPv4HostTest(sai_base_test.ThriftInterfaceDataPlane):
def runTest(self):
print
print "Sending packet port 2 -> port 1 (192.168.0.1 -> 10.10.10.1 [id = 101])"
switch_init(self.client)
port1 = port_list[1]
port2 = port_list[2]
v4_enabled = 1
v6_enabled = 1
mac_valid = 0
mac = ''
vr_id = sai_thrift_create_virtual_router(self.client, v4_enabled, v6_enabled)
rif_id1 = sai_thrift_create_router_interface(self.client, vr_id, 1, port1, 0, v4_enabled, v6_enabled, mac)
rif_id2 = sai_thrift_create_router_interface(self.client, vr_id, 1, port2, 0, v4_enabled, v6_enabled, mac)
addr_family = SAI_IP_ADDR_FAMILY_IPV4
ip_addr1 = '10.10.10.1'
ip_mask1 = '255.255.255.255'
dmac1 = '00:11:22:33:44:55'
nhop1 = sai_thrift_create_nhop(self.client, addr_family, ip_addr1, rif_id1)
sai_thrift_create_route(self.client, vr_id, addr_family, ip_addr1, ip_mask1, nhop1)
sai_thrift_create_neighbor(self.client, addr_family, rif_id1, ip_addr1, dmac1)
# send the test packet(s)
pkt = simple_tcp_packet(eth_dst='00:77:66:55:44:33',
eth_src='00:22:22:22:22:22',
ip_dst='10.10.10.1',
ip_src='192.168.0.1',
ip_id=105,
ip_ttl=64)
exp_pkt = simple_tcp_packet(
eth_dst='00:11:22:33:44:55',
eth_src='00:77:66:55:44:33',
ip_dst='10.10.10.1',
ip_src='192.168.0.1',
ip_id=105,
ip_ttl=63)
try:
send_packet(self, 2, str(pkt))
verify_packets(self, exp_pkt, [1])
finally:
sai_thrift_remove_neighbor(self.client, addr_family, rif_id1, ip_addr1, dmac1)
sai_thrift_remove_route(self.client, vr_id, addr_family, ip_addr1, ip_mask1, nhop1)
self.client.sai_thrift_remove_next_hop(nhop1)
self.client.sai_thrift_remove_router_interface(rif_id1)
self.client.sai_thrift_remove_router_interface(rif_id2)
self.client.sai_thrift_remove_virtual_router(vr_id)
class L3IPv4LpmTest(sai_base_test.ThriftInterfaceDataPlane):
def runTest(self):
print
print "Sending packet port 1 -> port 2 (192.168.0.1 -> 10.10.10.1 [id = 101])"
switch_init(self.client)
port1 = port_list[1]
port2 = port_list[2]
v4_enabled = 1
v6_enabled = 1
mac = ''
vr_id = sai_thrift_create_virtual_router(self.client, v4_enabled, v6_enabled)
rif_id1 = sai_thrift_create_router_interface(self.client, vr_id, 1, port1, 0, v4_enabled, v6_enabled, mac)
rif_id2 = sai_thrift_create_router_interface(self.client, vr_id, 1, port2, 0, v4_enabled, v6_enabled, mac)
addr_family = SAI_IP_ADDR_FAMILY_IPV4
ip_addr1 = '10.10.10.1'
ip_mask1 = '255.255.255.0'
dmac1 = '00:11:22:33:44:55'
nhop_ip1 = '20.20.20.1'
nhop1 = sai_thrift_create_nhop(self.client, addr_family, nhop_ip1, rif_id1)
sai_thrift_create_route(self.client, vr_id, addr_family, ip_addr1, ip_mask1, nhop1)
sai_thrift_create_neighbor(self.client, addr_family, rif_id1, nhop_ip1, dmac1)
# send the test packet(s)
pkt = simple_tcp_packet(eth_dst='00:77:66:55:44:33',
eth_src='00:22:22:22:22:22',
ip_dst='10.10.10.1',
ip_src='192.168.0.1',
ip_id=105,
ip_ttl=64)
exp_pkt = simple_tcp_packet(
eth_dst='00:11:22:33:44:55',
eth_src='00:77:66:55:44:33',
ip_dst='10.10.10.1',
ip_src='192.168.0.1',
ip_id=105,
ip_ttl=63)
try:
send_packet(self, 2, str(pkt))
verify_packets(self, exp_pkt, [1])
finally:
sai_thrift_remove_neighbor(self.client, addr_family, rif_id1, nhop_ip1, dmac1)
sai_thrift_remove_route(self.client, vr_id, addr_family, ip_addr1, ip_mask1, nhop1)
self.client.sai_thrift_remove_next_hop(nhop1)
self.client.sai_thrift_remove_router_interface(rif_id1)
self.client.sai_thrift_remove_router_interface(rif_id2)
self.client.sai_thrift_remove_virtual_router(vr_id)
class L3IPv6HostTest(sai_base_test.ThriftInterfaceDataPlane):
def runTest(self):
print
print "Sending packet port 1 -> port 2 (fc00:e968:6179::de52:7100 -> fdf8:f53e:61e4::18)"
switch_init(self.client)
port1 = port_list[1]
port2 = port_list[2]
v4_enabled = 1
v6_enabled = 1
mac = ''
vr_id = sai_thrift_create_virtual_router(self.client, v4_enabled, v6_enabled)
rif_id1 = sai_thrift_create_router_interface(self.client, vr_id, 1, port1, 0, v4_enabled, v6_enabled, mac)
rif_id2 = sai_thrift_create_router_interface(self.client, vr_id, 1, port2, 0, v4_enabled, v6_enabled, mac)
addr_family = SAI_IP_ADDR_FAMILY_IPV6
ip_addr1 = 'fdf8:f53e:61e4::18'
ip_mask1 = 'fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b'
dmac1 = '00:11:22:33:44:55'
nhop1 = sai_thrift_create_nhop(self.client, addr_family, ip_addr1, rif_id1)
sai_thrift_create_route(self.client, vr_id, addr_family, ip_addr1, ip_mask1, nhop1)
sai_thrift_create_neighbor(self.client, addr_family, rif_id1, ip_addr1, dmac1)
# send the test packet(s)
pkt = simple_tcpv6_packet( eth_dst='00:77:66:55:44:33',
eth_src='00:22:22:22:22:22',
ipv6_dst='fdf8:f53e:61e4::18',
ipv6_src='fc00:e968:6179::de52:7100',
ipv6_hlim=64)
exp_pkt = simple_tcpv6_packet(
eth_dst='00:11:22:33:44:55',
eth_src='00:77:66:55:44:33',
ipv6_dst='fdf8:f53e:61e4::18',
ipv6_src='fc00:e968:6179::de52:7100',
ipv6_hlim=63)
try:
send_packet(self, 2, str(pkt))
verify_packets(self, exp_pkt, [1])
finally:
sai_thrift_remove_neighbor(self.client, addr_family, rif_id1, ip_addr1, dmac1)
sai_thrift_remove_route(self.client, vr_id, addr_family, ip_addr1, ip_mask1, nhop1)
self.client.sai_thrift_remove_next_hop(nhop1)
self.client.sai_thrift_remove_router_interface(rif_id1)
self.client.sai_thrift_remove_router_interface(rif_id2)
self.client.sai_thrift_remove_virtual_router(vr_id)
class L3IPv6LpmTest(sai_base_test.ThriftInterfaceDataPlane):
def runTest(self):
print
print "IPv6 Lpm Test"
print "Sending packet port 2 -> port 1 (fc00:e968:6179::de52:7100 -> fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, routing with 1234:5678:9abc:def0:4422:1133:5577:9900/120 route"
switch_init(self.client)
port1 = port_list[1]
port2 = port_list[2]
v4_enabled = 1
v6_enabled = 1
mac = ''
vr_id = sai_thrift_create_virtual_router(self.client, v4_enabled, v6_enabled)
rif_id1 = sai_thrift_create_router_interface(self.client, vr_id, 1, port1, 0, v4_enabled, v6_enabled, mac)
rif_id2 = sai_thrift_create_router_interface(self.client, vr_id, 1, port2, 0, v4_enabled, v6_enabled, mac)
addr_family = SAI_IP_ADDR_FAMILY_IPV6
ip_addr1 = 'fdf8:f53e:61e4::18'
ip_mask1 = 'fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b'
dmac1 = '00:11:22:33:44:55'
nhop_ip1 = 'fdf8:f53e:61e4::18'
nhop1 = sai_thrift_create_nhop(self.client, addr_family, nhop_ip1, rif_id1)
sai_thrift_create_route(self.client, vr_id, addr_family, ip_addr1, ip_mask1, nhop1)
sai_thrift_create_neighbor(self.client, addr_family, rif_id1, nhop_ip1, dmac1)
# send the test packet(s)
pkt = simple_tcpv6_packet( eth_dst='00:77:66:55:44:33',
eth_src='00:22:22:22:22:22',
ipv6_dst='fdf8:f53e:61e4::18',
ipv6_src='fc00:e968:6179::de52:7100',
ipv6_hlim=64)
exp_pkt = simple_tcpv6_packet(
eth_dst='00:11:22:33:44:55',
eth_src='00:77:66:55:44:33',
ipv6_dst='fdf8:f53e:61e4::18',
ipv6_src='fc00:e968:6179::de52:7100',
ipv6_hlim=63)
try:
send_packet(self, 2, str(pkt))
verify_packets(self, exp_pkt, [1])
finally:
sai_thrift_remove_neighbor(self.client, addr_family, rif_id1, nhop_ip1, dmac1)
sai_thrift_remove_route(self.client, vr_id, addr_family, ip_addr1, ip_mask1, nhop1)
self.client.sai_thrift_remove_next_hop(nhop1)
self.client.sai_thrift_remove_router_interface(rif_id1)
self.client.sai_thrift_remove_router_interface(rif_id2)
self.client.sai_thrift_remove_virtual_router(vr_id)
class L3IPv4EcmpHostTest(sai_base_test.ThriftInterfaceDataPlane):
def runTest(self):
print
print "Sending packet port 1 -> port 2 (192.168.0.1 -> 10.10.10.1 [id = 101])"
switch_init(self.client)
port1 = port_list[1]
port2 = port_list[2]
| |
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pathlib
from typing import Any, Callable, List, Optional, Sequence, Tuple, Union
import torch
from PIL import Image
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from torchvision import transforms as T
from torchvision.datasets import VisionDataset
from torchvision.datasets.folder import has_file_allowed_extension, IMG_EXTENSIONS, make_dataset
from flash.core.classification import ClassificationDataPipeline
from flash.core.data.datamodule import DataModule
from flash.core.data.utils import _contains_any_tensor
def _pil_loader(path) -> Image:
# open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)
with open(path, "rb") as f, Image.open(f) as img:
return img.convert("RGB")
class FilepathDataset(torch.utils.data.Dataset):
"""Dataset that takes in filepaths and labels."""
def __init__(
self,
filepaths: Optional[Sequence[Union[str, pathlib.Path]]],
labels: Optional[Sequence],
loader: Callable,
transform: Optional[Callable] = None,
):
"""
Args:
filepaths: file paths to load with :attr:`loader`
labels: the labels corresponding to the :attr:`filepaths`.
Each unique value will get a class index by sorting them.
loader: the function to load an image from a given file path
transform: the transforms to apply to the loaded images
"""
self.fnames = filepaths or []
self.labels = labels or []
self.transform = transform
self.loader = loader
if self.has_labels:
self.label_to_class_mapping = {v: k for k, v in enumerate(list(sorted(list(set(self.fnames)))))}
@property
def has_labels(self) -> bool:
return self.labels is not None
def __len__(self) -> int:
return len(self.fnames)
def __getitem__(self, index: int) -> Tuple[Any, Optional[int]]:
filename = self.fnames[index]
img = self.loader(filename)
label = None
if self.has_labels:
label = self.label_to_class_mapping[filename]
return img, label
class FlashDatasetFolder(VisionDataset):
"""A generic data loader where the samples are arranged in this way: ::
root/class_x/xxx.ext
root/class_x/xxy.ext
root/class_x/xxz.ext
root/class_y/123.ext
root/class_y/nsdf3.ext
root/class_y/asd932_.ext
Args:
root: Root directory path.
loader: A function to load a sample given its path.
extensions: A list of allowed extensions. both extensions
and is_valid_file should not be passed.
transform: A function/transform that takes in
a sample and returns a transformed version.
E.g, ``transforms.RandomCrop`` for images.
target_transform: A function/transform that takes
in the target and transforms it.
is_valid_file: A function that takes path of a file
and check if the file is a valid file (used to check of corrupt files)
both extensions and is_valid_file should not be passed.
with_targets: Whether to include targets
img_paths: List of image paths to load. Only used when ``with_targets=False``
Attributes:
classes (list): List of the class names sorted alphabetically.
class_to_idx (dict): Dict with items (class_name, class_index).
samples (list): List of (sample path, class_index) tuples
targets (list): The class_index value for each image in the dataset
"""
def __init__(
self,
root: str,
loader: Callable,
extensions: Tuple[str] = IMG_EXTENSIONS,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
is_valid_file: Optional[Callable] = None,
with_targets: bool = True,
img_paths: Optional[List[str]] = None,
):
super(FlashDatasetFolder, self).__init__(root, transform=transform, target_transform=target_transform)
self.loader = loader
self.extensions = extensions
self.with_targets = with_targets
if with_targets:
classes, class_to_idx = self._find_classes(self.root)
samples = make_dataset(self.root, class_to_idx, extensions, is_valid_file)
if len(samples) == 0:
msg = "Found 0 files in subfolders of: {}\n".format(self.root)
if extensions is not None:
msg += "Supported extensions are: {}".format(",".join(extensions))
raise RuntimeError(msg)
self.classes = classes
self.class_to_idx = class_to_idx
self.samples = samples
self.targets = [s[1] for s in samples]
else:
if not img_paths:
raise MisconfigurationException(
"`FlashDatasetFolder(with_target=False)` but no `img_paths` were provided"
)
self.samples = img_paths
def _find_classes(self, dir):
"""
Finds the class folders in a dataset.
Args:
dir (string): Root directory path.
Returns:
tuple: (classes, class_to_idx) where classes are relative to (dir), and class_to_idx is a dictionary.
Ensures:
No class is a subdirectory of another.
"""
classes = [d.name for d in os.scandir(dir) if d.is_dir()]
classes.sort()
class_to_idx = {cls_name: i for i, cls_name in enumerate(classes)}
return classes, class_to_idx
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (sample, target) where target is class_index of the target class.
"""
if self.with_targets:
path, target = self.samples[index]
if self.target_transform is not None:
target = self.target_transform(target)
else:
path = self.samples[index]
sample = self.loader(path)
if self.transform is not None:
sample = self.transform(sample)
return (sample, target) if self.with_targets else sample
def __len__(self) -> int:
return len(self.samples)
_default_train_transforms = T.Compose([
T.RandomResizedCrop(224),
T.RandomHorizontalFlip(),
T.ToTensor(),
T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
])
_default_valid_transforms = T.Compose([
T.Resize(256),
T.CenterCrop(224),
T.ToTensor(),
T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
])
# todo: torch.nn.modules.module.ModuleAttributeError: 'Resize' object has no attribute '_forward_hooks'
# Find better fix and raised issue on torchvision.
_default_valid_transforms.transforms[0]._forward_hooks = {}
class ImageClassificationDataPipeline(ClassificationDataPipeline):
def __init__(
self,
train_transform: Optional[Callable] = _default_train_transforms,
valid_transform: Optional[Callable] = _default_valid_transforms,
use_valid_transform: bool = True,
loader: Callable = _pil_loader
):
self._train_transform = train_transform
self._valid_transform = valid_transform
self._use_valid_transform = use_valid_transform
self._loader = loader
def before_collate(self, samples: Any) -> Any:
if _contains_any_tensor(samples):
return samples
if isinstance(samples, str):
samples = [samples]
if isinstance(samples, (list, tuple)) and all(isinstance(p, str) for p in samples):
outputs = []
for sample in samples:
output = self._loader(sample)
transform = self._valid_transform if self._use_valid_transform else self._train_transform
outputs.append(transform(output))
return outputs
raise MisconfigurationException("The samples should either be a tensor or a list of paths.")
class ImageClassificationData(DataModule):
"""Data module for image classification tasks."""
@classmethod
def from_filepaths(
cls,
train_filepaths: Optional[Sequence[Union[str, pathlib.Path]]] = None,
train_labels: Optional[Sequence] = None,
train_transform: Optional[Callable] = _default_train_transforms,
valid_filepaths: Optional[Sequence[Union[str, pathlib.Path]]] = None,
valid_labels: Optional[Sequence] = None,
valid_transform: Optional[Callable] = _default_valid_transforms,
test_filepaths: Optional[Sequence[Union[str, pathlib.Path]]] = None,
test_labels: Optional[Sequence] = None,
loader: Callable = _pil_loader,
batch_size: int = 64,
num_workers: Optional[int] = None,
**kwargs
):
"""Creates a ImageClassificationData object from lists of image filepaths and labels
Args:
train_filepaths: sequence of file paths for training dataset. Defaults to None.
train_labels: sequence of labels for training dataset. Defaults to None.
train_transform: transforms for training dataset. Defaults to None.
valid_filepaths: sequence of file paths for validation dataset. Defaults to None.
valid_labels: sequence of labels for validation dataset. Defaults to None.
valid_transform: transforms for validation and testing dataset. Defaults to None.
test_filepaths: sequence of file paths for test dataset. Defaults to None.
test_labels: sequence of labels for test dataset. Defaults to None.
loader: function to load an image file. Defaults to None.
batch_size: the batchsize to use for parallel loading. Defaults to 64.
num_workers: The number of workers to use for parallelized loading.
Defaults to None which equals the number of available CPU threads.
Returns:
ImageClassificationData: The constructed data module.
Examples:
>>> img_data = ImageClassificationData.from_filepaths(["a.png", "b.png"], [0, 1]) # doctest: +SKIP
"""
train_ds = FilepathDataset(
filepaths=train_filepaths,
labels=train_labels,
loader=loader,
transform=train_transform,
)
valid_ds = (
FilepathDataset(
filepaths=valid_filepaths,
labels=valid_labels,
loader=loader,
transform=valid_transform,
) if valid_filepaths is not None else None
)
test_ds = (
FilepathDataset(
filepaths=test_filepaths,
labels=test_labels,
loader=loader,
transform=valid_transform,
) if test_filepaths is not None else None
)
return cls(
train_ds=train_ds,
valid_ds=valid_ds,
test_ds=test_ds,
batch_size=batch_size,
num_workers=num_workers,
)
@classmethod
def from_folders(
cls,
train_folder: Optional[Union[str, pathlib.Path]],
train_transform: Optional[Callable] = _default_train_transforms,
valid_folder: Optional[Union[str, pathlib.Path]] = None,
valid_transform: Optional[Callable] = _default_valid_transforms,
test_folder: Optional[Union[str, pathlib.Path]] = None,
loader: Callable = _pil_loader,
batch_size: int = 4,
num_workers: Optional[int] = None,
**kwargs
):
"""
Creates a ImageClassificationData object from folders of images arranged in this way: ::
train/dog/xxx.png
train/dog/xxy.png
train/dog/xxz.png
train/cat/123.png
train/cat/nsdf3.png
train/cat/asd932.png
Args:
train_folder: Path to training folder.
train_transform: Image transform to use for training set.
valid_folder: Path to validation folder.
valid_transform: Image transform to use for validation and test set.
test_folder: Path to test folder.
loader: A function to load an image given its path.
batch_size: Batch size for data loading.
num_workers: The number of workers to use for parallelized loading.
Defaults to None which equals the number of available CPU threads.
Returns:
ImageClassificationData: the constructed data module
Examples:
>>> img_data = ImageClassificationData.from_folders("train/") # doctest: +SKIP
"""
train_ds = FlashDatasetFolder(train_folder, transform=train_transform, loader=loader)
valid_ds = (
FlashDatasetFolder(valid_folder, transform=valid_transform, loader=loader)
if valid_folder is not None else None
)
test_ds = (
FlashDatasetFolder(test_folder, transform=valid_transform, loader=loader)
if test_folder is not None else None
)
datamodule = cls(
train_ds=train_ds,
valid_ds=valid_ds,
test_ds=test_ds,
batch_size=batch_size,
num_workers=num_workers,
)
datamodule.num_classes = len(train_ds.classes)
datamodule.data_pipeline = ImageClassificationDataPipeline(
train_transform=train_transform, valid_transform=valid_transform, loader=loader
| |
"""
Copyright (c) 2020 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from functools import partial
import logging
import cocotb
from cocotb.queue import Queue
from cocotb.triggers import Event, First, Timer, NullTrigger
from cocotb.utils import get_sim_time, get_sim_steps
from .dllp import Dllp, DllpType, FcType
from .tlp import Tlp
PCIE_GEN_RATE = {
1: 2.5e9*8/10,
2: 5e9*8/10,
3: 8e9*128/130,
4: 16e9*128/130,
5: 32e9*128/130,
}
PCIE_GEN_SYMB_TIME = {
1: 10/PCIE_GEN_RATE[1],
2: 10/PCIE_GEN_RATE[2],
3: 8/PCIE_GEN_RATE[3],
4: 8/PCIE_GEN_RATE[4],
5: 8/PCIE_GEN_RATE[5],
}
class FcStateData:
def __init__(self, init=0, *args, **kwargs):
self.__dict__.setdefault('_base_field_size', 12)
self.tx_field_size = self._base_field_size
self.tx_field_range = 2**self.tx_field_size
self.tx_field_mask = self.tx_field_range-1
self.rx_field_size = self._base_field_size
self.rx_field_range = 2**self.rx_field_size
self.rx_field_mask = self.rx_field_range-1
init = max(init, 0)
# Initial allocation of credits from receiver
self.tx_initial_allocation = 0
# Count of total number of FC units consumed at transmitter
self.tx_credits_consumed = 0
# Most recent number of FC units advertised by receiver
self.tx_credit_limit = 0
# Initial allocation of credits at receiver
self.rx_initial_allocation = init
# Total number of units granted to transmitter
self.rx_credits_allocated = init
# Total number of FC units consumed at receiver
self.rx_credits_received = 0
if init >= 2**(self._base_field_size-1):
raise ValueError("Initial credit allocation out of range")
super().__init__(*args, **kwargs)
def reset(self):
self.tx_initial_allocation = 0
self.tx_credits_consumed = 0
self.tx_credit_limit = 0
self.rx_credits_allocated = self.rx_initial_allocation
self.rx_credits_received = 0
@property
def tx_credits_available(self):
if self.tx_is_infinite():
return self.tx_field_mask
else:
return (self.tx_credit_limit - self.tx_credits_consumed) & self.tx_field_mask
@property
def rx_credits_available(self):
if self.rx_is_infinite():
return self.rx_field_mask
else:
return (self.rx_credits_allocated - self.rx_credits_received) & self.rx_field_mask
def tx_is_infinite(self):
return self.tx_initial_allocation == 0
def rx_is_infinite(self):
return self.rx_initial_allocation == 0
def tx_consume_fc(self, fc):
if not self.tx_is_infinite():
self.tx_credits_consumed = (self.tx_credits_consumed + fc) & self.tx_field_mask
# assert self.tx_credits_available < self.tx_field_range // 2
def rx_consume_fc(self, fc):
if not self.rx_is_infinite():
self.rx_credits_received = (self.rx_credits_received + fc) & self.rx_field_mask
# assert self.rx_credits_available < self.rx_field_range // 2
def rx_release_fc(self, fc):
if not self.rx_is_infinite():
self.rx_credits_allocated = (self.rx_credits_allocated + fc) & self.rx_field_mask
class FcStateHeader(FcStateData):
def __init__(self, *args, **kwargs):
self._base_field_size = 8
super().__init__(*args, **kwargs)
class FcChannelState:
def __init__(self, init=[0]*6, start_fc_update_timer=None):
self.ph = FcStateHeader(init[0])
self.pd = FcStateData(init[1])
self.nph = FcStateHeader(init[2])
self.npd = FcStateData(init[3])
self.cplh = FcStateHeader(init[4])
self.cpld = FcStateData(init[5])
self.active = False
self.fi1 = False
self.fi1p = False
self.fi1np = False
self.fi1cpl = False
self.fi2 = False
self.initialized = Event()
self.fc_p_update = Event()
self.fc_np_update = Event()
self.fc_cpl_update = Event()
self.start_fc_update_timer = start_fc_update_timer
self.next_fc_p_tx = 0
self.next_fc_np_tx = 0
self.next_fc_cpl_tx = 0
self.rx_release_fc_dict = {}
def reset(self):
self.ph.reset()
self.pd.reset()
self.nph.reset()
self.npd.reset()
self.cplh.reset()
self.cpld.reset()
self.active = False
self.fi1 = False
self.fi1p = False
self.fi1np = False
self.fi1cpl = False
self.fi2 = False
self.initialized.clear()
self.next_fc_p_tx = 0
self.next_fc_np_tx = 0
self.next_fc_cpl_tx = 0
self.rx_release_fc_dict = {}
def tx_has_credit(self, credit_type, dc=0):
if credit_type == FcType.P:
return self.ph.tx_credits_available > 0 and self.pd.tx_credits_available >= dc
elif credit_type == FcType.NP:
return self.nph.tx_credits_available > 0 and self.npd.tx_credits_available >= dc
elif credit_type == FcType.CPL:
return self.cplh.tx_credits_available > 0 and self.cpld.tx_credits_available >= dc
def tx_tlp_has_credit(self, tlp):
return self.tx_tlp_has_credit(tlp.get_fc_type(), tlp.get_data_credits())
def tx_consume_fc(self, credit_type, dc=0):
if credit_type == FcType.P:
self.ph.tx_consume_fc(1)
self.pd.tx_consume_fc(dc)
elif credit_type == FcType.NP:
self.nph.tx_consume_fc(1)
self.npd.tx_consume_fc(dc)
elif credit_type == FcType.CPL:
self.cplh.tx_consume_fc(1)
self.cpld.tx_consume_fc(dc)
def tx_consume_tlp_fc(self, tlp):
self.tx_consume_fc(tlp.get_fc_type(), tlp.get_data_credits())
async def tx_tlp_fc_gate(self, tlp):
credit_type = tlp.get_fc_type()
dc = tlp.get_data_credits()
await self.initialized.wait()
while not self.tx_has_credit(credit_type, dc):
if credit_type == FcType.P:
self.fc_p_update.clear()
await self.fc_p_update.wait()
elif credit_type == FcType.NP:
self.fc_np_update.clear()
await self.fc_np_update.wait()
elif credit_type == FcType.CPL:
self.fc_cpl_update.clear()
await self.fc_cpl_update.wait()
self.tx_consume_fc(credit_type, dc)
def rx_consume_fc(self, credit_type, dc=0):
if credit_type == FcType.P:
self.ph.rx_consume_fc(1)
self.pd.rx_consume_fc(dc)
elif credit_type == FcType.NP:
self.nph.rx_consume_fc(1)
self.npd.rx_consume_fc(dc)
elif credit_type == FcType.CPL:
self.cplh.rx_consume_fc(1)
self.cpld.rx_consume_fc(dc)
def rx_consume_tlp_fc(self, tlp):
self.rx_consume_fc(tlp.get_fc_type(), tlp.get_data_credits())
def rx_release_fc(self, credit_type, dc=0):
if credit_type == FcType.P:
self.next_fc_p_tx = 0
self.ph.rx_release_fc(1)
self.pd.rx_release_fc(dc)
elif credit_type == FcType.NP:
self.next_fc_np_tx = 0
self.nph.rx_release_fc(1)
self.npd.rx_release_fc(dc)
elif credit_type == FcType.CPL:
self.next_fc_cpl_tx = 0
self.cplh.rx_release_fc(1)
self.cpld.rx_release_fc(dc)
self.start_fc_update_timer()
def rx_release_tlp_fc(self, tlp):
self.rx_release_fc(tlp.get_fc_type(), tlp.get_data_credits())
def rx_release_fc_token(self, token):
if token in self.rx_release_fc_dict:
credit_type, dc = self.rx_release_fc_dict.pop(token)
self.rx_release_fc(credit_type, dc)
def rx_set_tlp_release_fc_cb(self, tlp):
credit_type = tlp.get_fc_type()
dc = tlp.get_data_credits()
token = object()
self.rx_release_fc_dict[token] = (credit_type, dc)
tlp.release_fc_cb = partial(self.rx_release_fc_token, token)
def rx_process_tlp_fc(self, tlp):
self.rx_consume_tlp_fc(tlp)
self.rx_set_tlp_release_fc_cb(tlp)
def handle_fc_dllp(self, dllp):
# Handle flow control DLLPs for this VC
if not self.active:
return
if not self.fi1:
# FC_INIT1
if dllp.type in {DllpType.INIT_FC1_P, DllpType.INIT_FC1_NP, DllpType.INIT_FC1_CPL,
DllpType.INIT_FC2_P, DllpType.INIT_FC2_NP, DllpType.INIT_FC2_CPL}:
# capture initial credit limit values from InitFC1 and InitFC2 DLLPs
if dllp.type in {DllpType.INIT_FC1_P, DllpType.INIT_FC2_P}:
self.ph.tx_initial_allocation = dllp.hdr_fc
self.ph.tx_credit_limit = dllp.hdr_fc
self.pd.tx_initial_allocation = dllp.data_fc
self.pd.tx_credit_limit = dllp.data_fc
self.fi1p = True
elif dllp.type in {DllpType.INIT_FC1_NP, DllpType.INIT_FC2_NP}:
self.nph.tx_initial_allocation = dllp.hdr_fc
self.nph.tx_credit_limit = dllp.hdr_fc
self.npd.tx_initial_allocation = dllp.data_fc
self.npd.tx_credit_limit = dllp.data_fc
self.fi1np = True
elif dllp.type in {DllpType.INIT_FC1_CPL, DllpType.INIT_FC2_CPL}:
self.cplh.tx_initial_allocation = dllp.hdr_fc
self.cplh.tx_credit_limit = dllp.hdr_fc
self.cpld.tx_initial_allocation = dllp.data_fc
self.cpld.tx_credit_limit = dllp.data_fc
self.fi1cpl = True
# exit FC_INIT1 once all credit types have been initialized
self.fi1 = self.fi1p and self.fi1np and self.fi1cpl
elif not self.fi2:
# FC_INIT2
if dllp.type in {DllpType.INIT_FC2_P, DllpType.INIT_FC2_NP, DllpType.INIT_FC2_CPL,
DllpType.UPDATE_FC_P, DllpType.UPDATE_FC_NP, DllpType.UPDATE_FC_CPL}:
# exit FC_INIT2 on receipt of any InitFC2 or UpdateFC DLLP; ignore values
self.fi2 = True
self.initialized.set()
else:
# normal operation
# capture new credit limits from UpdateFC DLLPs
if dllp.type == DllpType.UPDATE_FC_P:
if self.ph.tx_is_infinite():
assert dllp.hdr_fc == 0
if self.pd.tx_is_infinite():
assert dllp.data_fc == 0
self.ph.tx_credit_limit = dllp.hdr_fc
self.pd.tx_credit_limit = dllp.data_fc
self.fc_p_update.set()
elif dllp.type == DllpType.UPDATE_FC_NP:
if self.nph.tx_is_infinite():
assert dllp.hdr_fc == 0
if self.npd.tx_is_infinite():
assert dllp.data_fc == 0
self.nph.tx_credit_limit = dllp.hdr_fc
self.npd.tx_credit_limit = dllp.data_fc
self.fc_np_update.set()
elif dllp.type == DllpType.UPDATE_FC_CPL:
if self.cplh.tx_is_infinite():
assert dllp.hdr_fc == 0
if self.cpld.tx_is_infinite():
assert dllp.data_fc == 0
self.cplh.tx_credit_limit = dllp.hdr_fc
self.cpld.tx_credit_limit = dllp.data_fc
self.fc_cpl_update.set()
class Port:
"""Base port"""
def __init__(self, fc_init=[[0]*6]*8, *args, **kwargs):
self.log = logging.getLogger(f"cocotb.pcie.{type(self).__name__}.{id(self)}")
self.log.name = f"cocotb.pcie.{type(self).__name__}"
self.parent = None
self.rx_handler = None
self.max_link_speed = None
self.max_link_width = None
self.tx_queue = Queue(1)
self.tx_queue_sync = Event()
self.rx_queue = Queue()
self.cur_link_speed = None
self.cur_link_width = None
self.time_scale = get_sim_steps(1, 'sec')
# ACK/NAK protocol
# TX
self.next_transmit_seq = 0x000
self.ackd_seq = 0xfff
self.retry_buffer = Queue()
# RX
self.next_recv_seq = 0x000
self.nak_scheduled = False
self.ack_nak_latency_timer_steps = 0
self.max_payload_size = 128
self.max_latency_timer_steps = 0
self.send_ack = Event()
self._ack_latency_timer_cr = None
# Flow control
self.send_fc = Event()
self.fc_state = [FcChannelState(fc_init[k], self.start_fc_update_timer) for k in range(8)]
self.fc_initialized = False
self.fc_init_vc = 0
self.fc_init_type = FcType.P
self.fc_idle_timer_steps = get_sim_steps(10, 'us')
self.fc_update_steps = get_sim_steps(30, 'us')
self._fc_update_timer_cr = None
super().__init__(*args, **kwargs)
# VC0 is always active
self.fc_state[0].active = True
cocotb.start_soon(self._run_transmit())
cocotb.start_soon(self._run_receive())
cocotb.start_soon(self._run_fc_update_idle_timer())
def classify_tlp_vc(self, tlp):
return 0
async def send(self, pkt):
pkt.release_fc()
await self.fc_state[self.classify_tlp_vc(pkt)].tx_tlp_fc_gate(pkt)
await self.tx_queue.put(pkt)
self.tx_queue_sync.set()
async def _run_transmit(self):
await NullTrigger()
while True:
while self.tx_queue.empty() and not self.send_ack.is_set() and not self.send_fc.is_set() and self.fc_initialized:
self.tx_queue_sync.clear()
await First(self.tx_queue_sync.wait(), self.send_ack.wait(), self.send_fc.wait())
pkt = None
if self.send_ack.is_set():
# Send ACK or NAK DLLP
# Runs when
# - ACK timer expires
# - ACK/NAK transmit requested
self.send_ack.clear()
if self.nak_scheduled:
pkt = Dllp.create_nak((self.next_recv_seq-1) & 0xfff)
else:
pkt = Dllp.create_ack((self.next_recv_seq-1) & 0xfff)
elif self.send_fc.is_set() or (not self.fc_initialized and self.tx_queue.empty()):
# Send FC DLLP
# Runs when
# - FC timer expires
# - FC update DLLP transmit requested
# - FC init is not done AND no TLPs are queued for transmit
if self.send_fc.is_set():
# Send FC update DLLP
for fc_ch in self.fc_state:
if not fc_ch.active or not fc_ch.fi2:
continue
sim_time = get_sim_time()
if fc_ch.next_fc_p_tx <= sim_time:
pkt = Dllp()
pkt.vc = self.fc_init_vc
pkt.type = DllpType.UPDATE_FC_P
pkt.hdr_fc = fc_ch.ph.rx_credits_allocated
pkt.data_fc = fc_ch.pd.rx_credits_allocated
fc_ch.next_fc_p_tx = sim_time + self.fc_update_steps
break
if fc_ch.next_fc_np_tx <= sim_time:
pkt = | |
<filename>Src/Tests/test_interactive.py
#####################################################################################
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# This source code is subject to terms and conditions of the Microsoft Public License. A
# copy of the license can be found in the License.html file at the root of this distribution. If
# you cannot locate the Microsoft Public License, please send an email to
# <EMAIL>. By using this source code in any fashion, you are agreeing to be bound
# by the terms of the Microsoft Public License.
#
# You must not remove this notice, or any other, from this software.
#
#
#####################################################################################
from iptest.assert_util import *
skiptest("silverlight")
skiptest("win32")
from iptest.console_util import IronPythonInstance
remove_ironpython_dlls(testpath.public_testdir)
from sys import executable
from System import Environment
from sys import exec_prefix
extraArgs = ""
if "-X:LightweightScopes" in Environment.GetCommandLineArgs():
extraArgs += " -X:LightweightScopes"
def test_strings():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
# String exception
response = ipi.ExecuteLine("raise 'foo'", True)
AreEqual(response.replace("\r\r\n", "\n").replace("\r", ""),
"""Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: exceptions must be classes or instances, not str""")
# Multi-line string literal
ipi.ExecutePartialLine("\"\"\"Hello")
ipi.ExecutePartialLine("")
ipi.ExecutePartialLine("")
AreEqual("'Hello\\n\\n\\nWorld'", ipi.ExecuteLine("World\"\"\""))
ipi.ExecutePartialLine("if False: print 3")
ipi.ExecutePartialLine("else: print 'hello'")
AreEqual(r'hello', ipi.ExecuteLine(""))
# Empty line
AreEqual("", ipi.ExecuteLine(""))
ipi.End()
def test_exceptions():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
# parameterless exception
response = ipi.ExecuteLine("raise Exception", True)
AreEqual(response,
'''Traceback (most recent call last):
File "<stdin>", line 1, in <module>
Exception'''.replace("\n", "\r\r\n") + "\r")
ipi.End()
def test_exceptions_nested():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
ipi.ExecutePartialLine("def a(): return b()")
ipi.ExecuteLine("")
ipi.ExecutePartialLine("def b(): return 1/0")
ipi.ExecuteLine("")
response = ipi.ExecuteLine("a()", True)
response = response.replace("\r\r\n", "\n").strip()
Assert(response.startswith('''Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "<stdin>", line 1, in a
File "<stdin>", line 1, in b
ZeroDivisionError:'''), response)
ipi.End()
###############################################################################
# Test "ipy.exe -i script.py"
def test_interactive_mode():
inputScript = testpath.test_inputs_dir + "\\simpleCommand.py"
ipi = IronPythonInstance(executable, exec_prefix, extraArgs + " -i \"" + inputScript + "\"")
AreEqual(ipi.Start(), True)
ipi.EnsureInteractive()
AreEqual("1", ipi.ExecuteLine("x"))
ipi.End()
inputScript = testpath.test_inputs_dir + "\\raise.py"
ipi = IronPythonInstance(executable, exec_prefix, extraArgs + " -i \"" + inputScript + "\"")
AreEqual(ipi.Start(), True)
ipi.ReadError()
ipi.EnsureInteractive()
AreEqual("1", ipi.ExecuteLine("x"))
ipi.End()
inputScript = testpath.test_inputs_dir + "\\syntaxError.py"
ipi = IronPythonInstance(executable, exec_prefix, extraArgs + " -i \"" + inputScript + "\"")
AreEqual(ipi.Start(), True)
# ipi.EnsureInteractive()
AssertContains(ipi.ExecuteLine("x", True), "NameError")
ipi.End()
inputScript = testpath.test_inputs_dir + "\\exit.py"
ipi = IronPythonInstance(executable, exec_prefix, extraArgs + " -i \"" + inputScript + "\"")
(result, output, output2, exitCode) = ipi.StartAndRunToCompletion()
AreEqual(exitCode, 0)
ipi.End()
# interactive + -c
ipi = IronPythonInstance(executable, exec_prefix, extraArgs + " -i -c x=2")
AreEqual(ipi.Start(), True)
ipi.EnsureInteractive()
Assert(ipi.ExecuteLine("x", True).find("2") != -1)
ipi.End()
###############################################################################
# Test sys.exitfunc
def test_sys_exitfunc():
import clr
inputScript = testpath.test_inputs_dir + "\\exitFuncRuns.py"
ipi = IronPythonInstance(executable, exec_prefix, extraArgs + " \"" + inputScript + "\"")
(result, output, output2, exitCode) = ipi.StartAndRunToCompletion()
AreEqual(exitCode, 0)
AreEqual(output.find('hello world') > -1, True)
ipi.End()
args = extraArgs
if clr.GetCurrentRuntime().Configuration.DebugMode:
args = "-D " + args
inputScript = testpath.test_inputs_dir + "\\exitFuncRaises.py"
ipi = IronPythonInstance(executable, exec_prefix, args + " \"" + inputScript + "\"")
(result, output, output2, exitCode) = ipi.StartAndRunToCompletion()
AreEqual(exitCode, 0)
AreEqual(output2.find('Error in sys.exitfunc:') > -1, True)
AreEqual(output2.find('exitFuncRaises.py", line 19, in foo') > -1, True)
ipi.End()
# verify sys.exit(True) and sys.exit(False) return 1 and 0
ipi = IronPythonInstance(executable, exec_prefix, '-c "import sys; sys.exit(False)"')
res = ipi.StartAndRunToCompletion()
AreEqual(res[0], True) # should have started
AreEqual(res[1], '') # no std out
AreEqual(res[2], '') # no std err
AreEqual(res[3], 0) # should return 0
ipi = IronPythonInstance(executable, exec_prefix, '-c "import sys; sys.exit(True)"')
res = ipi.StartAndRunToCompletion()
AreEqual(res[0], True) # should have started
AreEqual(res[1], '') # no std out
AreEqual(res[2], '') # no std err
AreEqual(res[3], 1) # should return 0
# and verify it works at the interactive console as well
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
# parameterless exception
ipi.ExecuteLine("import sys")
AreEqual(ipi.ExecuteAndExit("sys.exit(False)"), 0)
# and verify it works at the interactive console as well
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
# parameterless exception
ipi.ExecuteLine("import sys")
AreEqual(ipi.ExecuteAndExit("sys.exit(True)"), 1)
#############################################################################
# verify we need to dedent to a previous valid indentation level
def test_indentation():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
ipi.ExecutePartialLine("if False:")
ipi.ExecutePartialLine(" print 'hello'")
response = ipi.ExecuteLine(" print 'goodbye'", True)
AreEqual(response.find('IndentationError') > 1, True)
ipi.End()
#############################################################################
# verify we dump exception details
def test_dump_exception():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs + " -X:ExceptionDetail")
AreEqual(ipi.Start(), True)
response = ipi.ExecuteLine("raise 'goodbye'", True)
AreEqual(response.count("IronPython.Hosting") >= 1, True)
ipi.End()
#############################################################################
# make sure we can enter try/except blocks
def test_try_except():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
ipi.ExecutePartialLine("try:")
ipi.ExecutePartialLine(" raise Exception('foo')")
ipi.ExecutePartialLine("except Exception, e:")
ipi.ExecutePartialLine(" if e.message=='foo':")
ipi.ExecutePartialLine(" print 'okay'")
response = ipi.ExecuteLine("")
Assert(response.find('okay') > -1)
ipi.End()
###########################################################
# Throw on "complete" incomplete syntax bug #864
def test_incomplate_syntax():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
ipi.ExecutePartialLine("class K:")
response = ipi.ExecuteLine("", True)
Assert("IndentationError:" in response)
ipi.End()
def test_incomplate_syntax_backslash():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
for i in xrange(4):
for j in xrange(i):
ipi.ExecutePartialLine("\\")
ipi.ExecutePartialLine("1 + \\")
for j in xrange(i):
ipi.ExecutePartialLine("\\")
response = ipi.ExecuteLine("2", True)
Assert("3" in response)
ipi.End()
###########################################################
# if , while, try, for and then EOF.
def test_missing_test():
for x in ['if', 'while', 'for', 'try']:
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
response = ipi.ExecuteLine(x, True)
Assert("SyntaxError:" in response)
ipi.End()
##########################################################
# Support multiple-levels of indentation
def test_indentation_levels():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
ipi.ExecutePartialLine("class K:")
ipi.ExecutePartialLine(" def M(self):")
ipi.ExecutePartialLine(" if 1:")
ipi.ExecutePartialLine(" pass")
response = ipi.ExecuteLine("")
ipi.End()
##########################################################
# Support partial lists
def test_partial_lists():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
ipi.ExecutePartialLine("[1")
ipi.ExecutePartialLine(" ,")
ipi.ExecutePartialLine(" 2")
response = ipi.ExecuteLine("]")
Assert("[1, 2]" in response)
ipi.ExecutePartialLine("[")
ipi.ExecutePartialLine("")
ipi.ExecutePartialLine("")
response = ipi.ExecuteLine("]")
Assert("[]" in response)
ipi.End()
def test_partial_lists_cp3530():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
try:
ipi.ExecutePartialLine("[{'a':None},")
response = ipi.ExecuteLine("]")
Assert("[{'a': None}]" in response, response)
ipi.ExecutePartialLine("[{'a'")
response = ipi.ExecutePartialLine(":None},")
response = ipi.ExecuteLine("]")
Assert("[{'a': None}]" in response, response)
ipi.ExecutePartialLine("[{'a':None},")
ipi.ExecutePartialLine("1,")
response = ipi.ExecuteLine("2]")
Assert("[{'a': None}, 1, 2]" in response, response)
finally:
ipi.End()
##########################################################
# Support partial tuples
def test_partial_tuples():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
ipi.ExecutePartialLine("(2")
ipi.ExecutePartialLine(" ,")
ipi.ExecutePartialLine(" 3")
response = ipi.ExecuteLine(")")
Assert("(2, 3)" in response)
ipi.ExecutePartialLine("(")
response = ipi.ExecuteLine(")")
Assert("()" in response)
ipi.ExecutePartialLine("'abc %s %s %s %s %s' % (")
ipi.ExecutePartialLine(" 'def'")
ipi.ExecutePartialLine(" ,'qrt',")
ipi.ExecutePartialLine(" 'jkl'")
ipi.ExecutePartialLine(",'jkl'")
ipi.ExecutePartialLine("")
ipi.ExecutePartialLine(",")
ipi.ExecutePartialLine("")
ipi.ExecutePartialLine("")
ipi.ExecutePartialLine("'123'")
response = ipi.ExecuteLine(")")
Assert("'abc def qrt jkl jkl 123'" in response)
ipi.ExecutePartialLine("a = (")
ipi.ExecutePartialLine(" 1")
ipi.ExecutePartialLine(" , ")
ipi.ExecuteLine(")")
response = ipi.ExecuteLine("a")
Assert("(1,)" in response)
ipi.ExecutePartialLine("(")
ipi.ExecutePartialLine("'joe'")
ipi.ExecutePartialLine(" ")
ipi.ExecutePartialLine(" #")
ipi.ExecutePartialLine(",")
ipi.ExecutePartialLine("2")
response = ipi.ExecuteLine(")")
Assert("('joe', 2)" in response)
ipi.ExecutePartialLine("(")
ipi.ExecutePartialLine("")
ipi.ExecutePartialLine("")
response = ipi.ExecuteLine(")")
Assert("()" in response)
ipi.End()
##########################################################
# Support partial dicts
def test_partial_dicts():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
ipi.ExecutePartialLine("{2:2")
ipi.ExecutePartialLine(" ,")
ipi.ExecutePartialLine(" 2:2")
response = ipi.ExecuteLine("}")
Assert("{2: 2}" in response)
ipi.ExecutePartialLine("{")
response = ipi.ExecuteLine("}")
Assert("{}" in response)
ipi.ExecutePartialLine("a = {")
ipi.ExecutePartialLine(" None:2")
ipi.ExecutePartialLine(" , ")
ipi.ExecuteLine("}")
response = ipi.ExecuteLine("a")
Assert("{None: 2}" in response)
ipi.ExecutePartialLine("{")
ipi.ExecutePartialLine("'joe'")
ipi.ExecutePartialLine(": ")
ipi.ExecutePartialLine(" 42")
ipi.ExecutePartialLine(",")
ipi.ExecutePartialLine("3:45")
response = ipi.ExecuteLine("}")
Assert(repr({'joe':42, 3:45}) in response)
ipi.ExecutePartialLine("{")
ipi.ExecutePartialLine("")
ipi.ExecutePartialLine("")
response = ipi.ExecuteLine("}")
Assert("{}" in response)
ipi.End()
###########################################################
# Some whitespace wackiness
def test_whitespace():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
ipi.ExecuteLine(" ")
response = ipi.ExecuteLine("")
ipi.End()
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
ipi.ExecuteLine(" ")
response = ipi.ExecuteLine("2")
Assert("2" in response)
ipi.End()
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
ipi.ExecuteLine(" ")
response = ipi.ExecuteLine(" 2", True)
Assert("SyntaxError:" in response)
ipi.End()
###########################################################
# test the indentation error in the interactive mode
def test_indentation_interactive():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
ipi.ExecutePartialLine("class C:pass")
response = ipi.ExecuteLine("")
AreEqual(response, "")
ipi.ExecutePartialLine("class D(C):")
response = ipi.ExecuteLine("", True)
Assert("IndentationError:" in response)
ipi.End()
###########################################################
# test /mta w/ no other args
def test_mta():
ipi = IronPythonInstance(executable, exec_prefix, '-X:MTA')
AreEqual(ipi.Start(), True)
ipi.ExecutePartialLine("class C:pass")
response = ipi.ExecuteLine("")
AreEqual(response, "")
ipi.ExecutePartialLine("class D(C):")
response = ipi.ExecuteLine("", True)
Assert("IndentationError:" in response)
ipi.End()
###########################################################
# test for comments in interactive input
def test_comments():
ipi = IronPythonInstance(executable, exec_prefix, extraArgs)
AreEqual(ipi.Start(), True)
response = ipi.ExecuteLine("# this is some comment line")
AreEqual(response, "")
response = ipi.ExecuteLine(" # this is some comment line")
AreEqual(response, "")
response = ipi.ExecuteLine("# this is some more comment line")
AreEqual(response, "")
ipi.ExecutePartialLine("if 100:")
ipi.ExecutePartialLine(" print 100")
ipi.ExecutePartialLine("# this is some more | |
<filename>xline/elements.py<gh_stars>0
import numpy as np
from .base_classes import Element
from .be_beamfields.beambeam import BeamBeam4D
from .be_beamfields.beambeam import BeamBeam6D
from .be_beamfields.spacecharge import SCCoasting
from .be_beamfields.spacecharge import SCQGaussProfile
from .be_beamfields.spacecharge import SCInterpolatedProfile
_factorial = np.array(
[
1,
1,
2,
6,
24,
120,
720,
5040,
40320,
362880,
3628800,
39916800,
479001600,
6227020800,
87178291200,
1307674368000,
20922789888000,
355687428096000,
6402373705728000,
121645100408832000,
2432902008176640000,
]
)
class Drift(Element):
"""Drift in expanded form"""
_description = [("length", "m", "Length of the drift", 0)]
def track(self, p):
length = self.length
rpp = p.rpp
xp = p.px * rpp
yp = p.py * rpp
p.x += xp * length
p.y += yp * length
p.zeta += length * (p.rvv - (1 + (xp ** 2 + yp ** 2) / 2))
p.s += length
class DriftExact(Drift):
"""Drift in exact form"""
_description = [("length", "m", "Length of the drift", 0)]
def track(self, p):
sqrt = p._m.sqrt
length = self.length
opd = 1 + p.delta
lpzi = length / sqrt(opd ** 2 - p.px ** 2 - p.py ** 2)
p.x += p.px * lpzi
p.y += p.py * lpzi
p.zeta += p.rvv * length - opd * lpzi
p.s += length
def _arrayofsize(ar, size):
ar = np.array(ar)
if len(ar) == 0:
return np.zeros(size, dtype=ar.dtype)
elif len(ar) < size:
ar = np.hstack([ar, np.zeros(size - len(ar), dtype=ar.dtype)])
return ar
class Multipole(Element):
""" Multipole """
_description = [
(
"knl",
"m^-n",
"Normalized integrated strength of normal components",
lambda: [0],
),
(
"ksl",
"m^-n",
"Normalized integrated strength of skew components",
lambda: [0],
),
(
"hxl",
"rad",
"Rotation angle of the reference trajectory"
"in the horizzontal plane",
0,
),
(
"hyl",
"rad",
"Rotation angle of the reference trajectory in the vertical plane",
0,
),
("length", "m", "Length of the originating thick multipole", 0),
]
@property
def order(self):
return max(len(self.knl), len(self.ksl)) - 1
def track(self, p):
order = self.order
length = self.length
knl = _arrayofsize(self.knl, order + 1)
ksl = _arrayofsize(self.ksl, order + 1)
x = p.x
y = p.y
chi = p.chi
dpx = knl[order]
dpy = ksl[order]
for ii in range(order, 0, -1):
zre = (dpx * x - dpy * y) / ii
zim = (dpx * y + dpy * x) / ii
dpx = knl[ii - 1] + zre
dpy = ksl[ii - 1] + zim
dpx = -chi * dpx
dpy = chi * dpy
# curvature effect kick
hxl = self.hxl
hyl = self.hyl
delta = p.delta
if hxl != 0 or hyl != 0:
b1l = chi * knl[0]
a1l = chi * ksl[0]
hxlx = hxl * x
hyly = hyl * y
if length > 0:
hxx = hxlx / length
hyy = hyly / length
else: # non physical weak focusing disabled (SixTrack mode)
hxx = 0
hyy = 0
dpx += hxl + hxl * delta - b1l * hxx
dpy -= hyl + hyl * delta - a1l * hyy
p.zeta -= chi * (hxlx - hyly)
p.px += dpx
p.py += dpy
class RFMultipole(Element):
"""
H= -l sum Re[ (kn[n](zeta) + i ks[n](zeta) ) (x+iy)**(n+1)/ n ]
kn[n](z) = k_n cos(2pi w tau + pn/180*pi)
ks[n](z) = k_n cos(2pi w tau + pn/180*pi)
"""
_description = [
("voltage", "volt", "Voltage", 0),
("frequency", "hertz", "Frequency", 0),
("lag", "degree", "Delay in the cavity sin(lag - w tau)", 0),
("knl", "", "...", lambda: [0]),
("ksl", "", "...", lambda: [0]),
("pn", "", "...", lambda: [0]),
("ps", "", "...", lambda: [0]),
]
@property
def order(self):
return max(len(self.knl), len(self.ksl)) - 1
def track(self, p):
sin = p._m.sin
cos = p._m.cos
pi = p._m.pi
order = self.order
k = 2 * pi * self.frequency / p.clight
tau = p.zeta / p.rvv / p.beta0
ktau = k * tau
deg2rad = pi / 180
knl = _arrayofsize(self.knl, order + 1)
ksl = _arrayofsize(self.ksl, order + 1)
pn = _arrayofsize(self.pn, order + 1) * deg2rad
ps = _arrayofsize(self.ps, order + 1) * deg2rad
x = p.x
y = p.y
dpx = 0
dpy = 0
dptr = 0
zre = 1
zim = 0
for ii in range(order + 1):
pn_ii = pn[ii] - ktau
ps_ii = ps[ii] - ktau
cn = cos(pn_ii)
sn = sin(pn_ii)
cs = cos(ps_ii)
ss = sin(ps_ii)
# transverse kick order i!
dpx += cn * knl[ii] * zre - cs * ksl[ii] * zim
dpy += cs * ksl[ii] * zre + cn * knl[ii] * zim
# compute z**(i+1)/(i+1)!
zret = (zre * x - zim * y) / (ii + 1)
zim = (zim * x + zre * y) / (ii + 1)
zre = zret
fnr = knl[ii] * zre
# fni = knl[ii] * zim
# fsr = ksl[ii] * zre
fsi = ksl[ii] * zim
# energy kick order i+1
dptr += sn * fnr - ss * fsi
chi = p.chi
p.px += -chi * dpx
p.py += chi * dpy
dv0 = self.voltage * sin(self.lag * deg2rad - ktau)
p.add_to_energy(p.charge_ratio * p.q0 * (dv0 - p.p0c * k * dptr))
class Cavity(Element):
"""Radio-frequency cavity"""
_description = [
("voltage", "V", "Integrated energy change", 0),
("frequency", "Hz", "Frequency of the cavity", 0),
("lag", "degree", "Delay in the cavity sin(lag - w tau)", 0),
]
def track(self, p):
sin = p._m.sin
pi = p._m.pi
k = 2 * pi * self.frequency / p.clight
tau = p.zeta / p.rvv / p.beta0
phase = self.lag * pi / 180 - k * tau
p.add_to_energy(p.charge_ratio * p.q0 * self.voltage * sin(phase))
class SawtoothCavity(Element):
"""Radio-frequency cavity"""
_description = [
("voltage", "V", "Integrated energy change", 0),
("frequency", "Hz", "Equivalent Frequency of the cavity", 0),
("lag", "degree", "Delay in the cavity `lag - w tau`", 0),
]
def track(self, p):
pi = p._m.pi
k = 2 * pi * self.frequency / p.clight
tau = p.zeta / p.rvv / p.beta0
phase = self.lag * pi / 180 - k * tau
phase = (phase + pi) % (2 * pi) - pi
p.add_to_energy(p.charge_ratio * p.q0 * self.voltage * phase)
class XYShift(Element):
"""shift of the reference"""
_description = [
("dx", "m", "Horizontal shift", 0),
("dy", "m", "Vertical shift", 0),
]
def track(self, p):
p.x -= self.dx
p.y -= self.dy
class Elens(Element):
"""Hollow Electron Lens"""
_description = [("voltage", "V", "Voltage of the electron lens", 0),
("current", "A", "Current of the e-beam", 0),
("inner_radius", "m", "Inner radius of the hollow e-beam", 0),
("outer_radius", "m", "Outer radius of the hollow e-beam", 0),
("ebeam_center_x", "m", "Center of the e-beam in x", 0),
("ebeam_center_y", "m", "Center of the e-beam in y", 0),
("elens_length", "m", "Length of the hollow electron lens", 0)
]
def track(self, p):
# vacuum permittivity
epsilon0 = p.epsilon0
pi = p._m.pi # pi
clight = p.clight # speed of light
e_mass = p.emass # electron mass
# get the transverse amplitude
# TO DO: needs to be modified for off-centererd e-beam
r = np.sqrt(p.x**2 + p.y**2)
# magnetic rigidity
if type(p.pc) is float:
Brho = p.pc/(p.q0*p.clight)
else:
Brho = p.pc[0]/(p.q0*p.clight)
# Electron properties
Ekin_e = self.voltage # kinetic energy
Etot_e = Ekin_e + e_mass # total energy
p_e = np.sqrt(Etot_e**2 - e_mass**2) # electron momentum
beta_e = p_e/Etot_e # relativ. beta
# relativistic beta of protons
beta_p = p.rvv*p.beta0
# abbreviate for better readability
r1 = self.inner_radius
r2 = self.outer_radius
I = self.current
# geometric factor frr
frr = ((r**2 - r1**2)/(r2**2 - r1**2)) # uniform distribution
try:
frr = [max(0,iitem) for iitem in frr]
frr = [min(1,iitem) for iitem in frr]
frr = np.array(frr, dtype = float)
except TypeError:
frr = max(0,frr)
frr = min(1,frr)
frr = np.array([frr], dtype=float)
#
#
# if len(frr)>0:
# frr[frr<0] = 0
# frr[frr>1] = 1
# calculate the kick at r2 (maximum kick)
theta_max = ((1/(4*pi*epsilon0))*(2*self.elens_length*I)*
(1+beta_e*beta_p)*(1/(r2*Brho*beta_e*beta_p*clight**2)))
# calculate the kick of the particles
# the (-1) stems from the attractive force of the E-field
theta = (-1)*theta_max*r2*p.rpp*p.chi
print("type frr", type(frr))
print("type r", type(r))
theta = theta*np.divide(frr, r, out=np.zeros_like(frr), where=r!=0)
# convert px and py | |
<reponame>duplico/qc15_statemaker
"""The game state object implementations for QC15's Statemaker tool.
"""
from __future__ import print_function
import sys
import csv
import textwrap
import struct
import networkx as nx
from chardet.universaldetector import UniversalDetector
from qc15_game import *
__author__ = "<NAME> @duplico"
__copyright__ = "(c) 2018, <NAME>"
__license__ = "MIT"
__email__ = "<EMAIL>"
all_actions = []
main_actions = []
aux_actions = []
all_states = []
main_text = [] # Lives in FRAM
aux_text = [] # Lives in flash
def text_addr(text):
if text in main_text:
return main_text.index(text)
else:
return len(main_text) + aux_text.index(text)
state_name_ids = dict()
closable_states = set()
max_inputs = 0
max_timers = 0
max_others = 0
all_other_input_descs = [
'BADGESNEARBY0',
'BADGESNEARBYSOME',
'NAME_NOT_FOUND',
'NAME_FOUND',
'CONNECT_SUCCESS_NEW',
'CONNECT_SUCCESS_OLD',
'CONNECT_FAILURE'
]
all_other_output_descs = [
'CUSTOMSTATEUSERNAME', # User name entry
'NAMESEARCH',
'SET_CONNECTABLE',
'CONNECT',
'STATUS_MENU',
]
all_animations = [
'flag_rainbow',
'flag_bi',
'flag_pan',
'flag_trans',
'flag_ace',
'flag_ally',
'flag_leather',
'flag_bear',
'flag_blue',
'flag_lblue',
'flag_green',
'flag_red',
'flag_yellow',
'flag_pink',
'flag_white',
'flag_newbie',
'flag_original',
'flag_regular',
'flag_freezer',
'flag_techsupport',
'animFirstLights',
'whiteDiscovery',
'animSpinBlue',
'animSpinOrange',
'animSpinYellow',
'animSpinGreen',
'animSpinRed',
'animSpinWhite',
'animSpinPink',
'animSolidBlue',
'animSolidGreen',
'animSolidYellow',
'animSolidOrange',
'animSolidRed',
'animSolidWhite'
]
row_number = 0
row_lines = []
statefile = ''
class GameTimer(object):
def __init__(self, duration, recurring, result):
self.duration = duration
self.recurring = recurring
self.result = result
def sort_key(self):
# We want all one-time timers to go before recurring,
# and within those two we need them sorted by duration, longest first.
# So, because a simple sort() is ascending, we want:
# high duration -> low sort_key
key = -self.duration
# And one-time -> low sort_key
if not self.recurring:
# Remember, key is negative.
key = key * 1000000
def pack(self):
"""
typedef struct {
/// The duration of this timer, in 1/32 of seconds.
uint32_t duration;
/// True if this timer should repeat.
uint8_t recurring;
uint16_t result_action_id;
} game_timer_t;
"""
return struct.pack(
'<LBxH',
*self.as_int_sequence()
)
def as_int_sequence(self):
return (
int(self.duration * 32), # Convert from seconds to qc clock ticks
self.recurring,
self.result.id()
)
def as_struct_text(self):
struct_text = "(game_timer_t){.duration=%d, .recurring=%d, " \
".result_action_id=%d}" % self.as_int_sequence()
return struct_text
def __str__(self):
return '%d %s' % (self.duration, 'R' if self.recurring else 'O')
def __repr__(self):
return str(self)
class GameInput(object):
def __init__(self, text, result):
if len(text) > 23:
error(statefile, "Input text too long.", badtext=text)
if '$' in text:
error(statefile, "Variables not allowed in inputs, treating as literal.",
errtype="WARNING")
if text not in main_text:
main_text.append(text)
if text in aux_text:
aux_text.remove(text)
self.result = result
self.text = text
def pack(self):
"""
typedef struct {
uint16_t text_addr;
uint16_t result_action_id;
} game_user_in_t;
"""
return struct.pack(
'<HH',
*self.as_int_sequence()
)
def as_int_sequence(self):
return (
text_addr(self.text),
self.result.id()
)
def as_struct_text(self):
struct_text = "(game_user_in_t){.text_addr=%d, .result_action_id=%d}" %\
self.as_int_sequence()
return struct_text
class GameOther(object):
def __init__(self, desc, result):
self.result = result
self.desc = desc.upper()
if self.desc in all_other_input_descs:
self.id = all_other_input_descs.index(self.desc)
else:
self.id = len(all_other_input_descs)
all_other_input_descs.append(self.desc)
def pack(self):
"""
typedef struct {
uint16_t type_id;
uint16_t result_action_id;
} game_other_in_t;
"""
return struct.pack(
'<HH',
*self.as_int_sequence()
)
def as_int_sequence(self):
return (
all_other_input_descs.index(self.desc),
self.result.id()
)
def as_struct_text(self):
struct_text = "(game_other_in_t){.type_id=%d, .result_action_id=%d}" %\
self.as_int_sequence()
return struct_text
class GameAction(object):
max_extra_details = 0
def __init__(self, input_tuple, state_name, prev_action, prev_choice,
action_type=None, detail=None,
duration=0, choice_share=1, row=None, aux=False):
if row:
action_type = row['Result_type']
detail = row['Result_detail']
# TODO: default duration per result type
duration = float(row['Result_duration']) if row['Result_duration'] else 0.0
choice_share = int(row['Choice_share']) if row['Choice_share'] else 1
all_actions.append(self)
if (aux):
aux_actions.append(self)
else:
main_actions.append(self)
self.action_type = action_type
self.state_name = state_name
self.detail = detail
self.duration = duration
self.choice_share = choice_share
self.choice_total = 0
self.next_action = None
self.next_choice = None
self.prev_action = prev_action
self.prev_choice = prev_choice
self.input_tuple = input_tuple
# Now, handle the specific disposition of our details based upon
# which action type we are:
# If we're text, we need to load the text into the master text list:
if self.action_type.startswith("TEXT"):
self.detail = self.detail.replace('`', '\x96')
if aux and self.detail not in aux_text and self.detail not in main_text:
aux_text.append(self.detail)
elif self.detail not in main_text:
main_text.append(self.detail)
if self.action_type == 'OTHER':
self.detail = self.detail.upper().replace(' ', '_')
self.detail = self.detail.replace('.', '')
if self.detail not in all_other_output_descs:
all_other_output_descs.append(self.detail)
if self.action_type in ('PREVIOUS', 'PUSH', 'POP'):
# Detail and duration are ignored.
self.detail = ''
self.duration = 0
if self.action_type == 'CLOSE':
# TODO
pass
if self.action_type.startswith("SET_ANIM"):
global all_animations
if self.detail.upper().strip() == 'NONE':
self.detail = None
elif self.detail not in all_animations:
all_animations.append(self.detail)
if self.action_type == 'STATE_TRANSITION':
self.detail = self.detail.upper()
if self.detail not in state_name_ids:
# ERROR! Unless we're allowing implicit state declaration.
if GameState.allow_implicit:
# Create a new game state for this transition.
error(statefile, "Implicitly creating undefined state '%s'" % self.detail, badtext=self.detail, errtype='WARNING')
new_state = GameState(self.detail)
# The state will display its name (truncated to 24 chars),
# then return to the current state (the one that called it)
# TODO: use pop instead
new_state_first_action = GameAction(
('ENTER', ''),
self.detail[:24],
None,
None,
action_type='TEXT',
detail=self.detail[:24].upper(),
duration=5,
)
new_state.insert_event(('ENTER', ''), new_state_first_action)
GameAction(
('ENTER', ''),
self.detail[:24],
new_state_first_action,
None,
action_type='STATE_TRANSITION',
detail=self.state_name,
)
else:
# ERROR.
error(statefile, "Transition to undefined state '%s'" % self.detail, badtext=self.detail)
self.detail = all_states[state_name_ids[self.detail]]
# Finally, handle wiring up our linked-list structure:
# If we're a member of a choice set, we need to link the existing
# last element to ourself, and then we should increment every other
# member's choice_total so they're all the same.
self.choice_total = self.choice_share
if self.prev_choice:
# Wire our previous choice up to us.
self.prev_choice.next_choice = self
# Now compute the choice total so far, also incrementing every
# previous choice's total by our choice share. (which will make
# them all equal, I hope I hope I hope)
previous_choice = self.prev_choice
while previous_choice:
self.choice_total += previous_choice.choice_share
previous_choice.choice_total += self.choice_share
previous_choice = previous_choice.prev_choice
# If we're in an action sequence, we need to tell the existing last
# element of that sequence that we come next.
if self.prev_action:
self.prev_action.next_action = self
def id(self):
return all_actions.index(self)
def get_previous_action(self):
if self.prev_action:
return self.prev_action
last_choice = self.prev_choice
if not last_choice:
# There are neither choices before us in the chain, nor an explicit
# previous action. That means that we are the first in the chain,
# so return None.
return None
# Find the first node in the choice set linked-list:
while last_choice.prev_choice:
assert last_choice.choice_total == last_choice.prev_choice.choice_total
last_choice = last_choice.prev_choice
# Now last_choice holds the first node in the choice set. Return its
# previous action:
return last_choice.prev_action
@staticmethod
def create_from_row(input_tuple, state, prev_action, prev_choice, row):
if row['Result_type'] != 'TEXT':
action = GameAction(input_tuple, state.name, prev_action,
prev_choice, row=row)
if input_tuple not in state.events:
state.insert_event(input_tuple, action)
return action
# If we've gotten to this point, that means ... drumroll...
# We're dealing with a TEXT row!
duration = float(row['Result_duration']) if row['Result_duration'] else None
choice_share = int(row['Choice_share']) if row['Choice_share'] else 1
# This means there's a couple of extra things we need to do.
# We definitely need to generate the first action series.
first_action, last_action = GameAction.create_text_action_seq(
input_tuple,
state.name,
prev_action,
prev_choice,
row['Result_detail'],
duration,
choice_share,
aux=False
)
# This gave us two actions, which may be the same as each other.
# See if this needs to be attached directly to an event:
if input_tuple not in state.events:
# If so, we need to add the very first event in this chain.
if input_tuple not in state.events:
state.insert_event(input_tuple, first_action)
if (0 not in row) or (not row[0]):
# If this is the only column, we're done. Time to return
# last_action, which is what additional actions will need to
# link to.
return last_action
# If we're here it means that we have multiple text choices to deal
# with. We've saved the results of our first sequence creation call.
# Now we need to link some new choices to it.
# We save first_action, because we need to link additional action
# choices to it. We save last_action, because this function needs to
| |
<gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
# Script for updating SPIR-V dialect by scraping information from SPIR-V
# HTML and JSON specs from the Internet.
#
# For example, to define the enum attribute for SPIR-V memory model:
#
# ./gen_spirv_dialect.py --base_td_path /path/to/SPIRVBase.td \
# --new-enum MemoryModel
#
# The 'operand_kinds' dict of spirv.core.grammar.json contains all supported
# SPIR-V enum classes.
import itertools
import re
import requests
import textwrap
import yaml
SPIRV_HTML_SPEC_URL = 'https://www.khronos.org/registry/spir-v/specs/unified1/SPIRV.html'
SPIRV_JSON_SPEC_URL = 'https://raw.githubusercontent.com/KhronosGroup/SPIRV-Headers/master/include/spirv/unified1/spirv.core.grammar.json'
SPIRV_OCL_EXT_HTML_SPEC_URL = 'https://www.khronos.org/registry/SPIR-V/specs/unified1/OpenCL.ExtendedInstructionSet.100.html'
SPIRV_OCL_EXT_JSON_SPEC_URL = 'https://raw.githubusercontent.com/KhronosGroup/SPIRV-Headers/master/include/spirv/unified1/extinst.opencl.std.100.grammar.json'
AUTOGEN_OP_DEF_SEPARATOR = '\n// -----\n\n'
AUTOGEN_ENUM_SECTION_MARKER = 'enum section. Generated from SPIR-V spec; DO NOT MODIFY!'
AUTOGEN_OPCODE_SECTION_MARKER = (
'opcode section. Generated from SPIR-V spec; DO NOT MODIFY!')
def get_spirv_doc_from_html_spec(url, settings):
"""Extracts instruction documentation from SPIR-V HTML spec.
Returns:
- A dict mapping from instruction opcode to documentation.
"""
if url is None:
url = SPIRV_HTML_SPEC_URL
response = requests.get(url)
spec = response.content
from bs4 import BeautifulSoup
spirv = BeautifulSoup(spec, 'html.parser')
doc = {}
if settings.gen_ocl_ops:
section_anchor = spirv.find('h2', {'id': '_binary_form'})
for section in section_anchor.parent.find_all('div', {'class': 'sect2'}):
for table in section.find_all('table'):
inst_html = table.tbody.tr.td
opname = inst_html.a['id']
# Ignore the first line, which is just the opname.
doc[opname] = inst_html.text.split('\n', 1)[1].strip()
else:
section_anchor = spirv.find('h3', {'id': '_a_id_instructions_a_instructions'})
for section in section_anchor.parent.find_all('div', {'class': 'sect3'}):
for table in section.find_all('table'):
inst_html = table.tbody.tr.td.p
opname = inst_html.a['id']
# Ignore the first line, which is just the opname.
doc[opname] = inst_html.text.split('\n', 1)[1].strip()
return doc
def get_spirv_grammar_from_json_spec(url):
"""Extracts operand kind and instruction grammar from SPIR-V JSON spec.
Returns:
- A list containing all operand kinds' grammar
- A list containing all instructions' grammar
"""
response = requests.get(SPIRV_JSON_SPEC_URL)
spec = response.content
import json
spirv = json.loads(spec)
if url is None:
return spirv['operand_kinds'], spirv['instructions']
response_ext = requests.get(url)
spec_ext = response_ext.content
spirv_ext = json.loads(spec_ext)
return spirv['operand_kinds'], spirv_ext['instructions']
def split_list_into_sublists(items):
"""Split the list of items into multiple sublists.
This is to make sure the string composed from each sublist won't exceed
80 characters.
Arguments:
- items: a list of strings
"""
chuncks = []
chunk = []
chunk_len = 0
for item in items:
chunk_len += len(item) + 2
if chunk_len > 80:
chuncks.append(chunk)
chunk = []
chunk_len = len(item) + 2
chunk.append(item)
if len(chunk) != 0:
chuncks.append(chunk)
return chuncks
def uniquify_enum_cases(lst):
"""Prunes duplicate enum cases from the list.
Arguments:
- lst: List whose elements are to be uniqued. Assumes each element is a
(symbol, value) pair and elements already sorted according to value.
Returns:
- A list with all duplicates removed. The elements are sorted according to
value and, for each value, uniqued according to symbol.
original list,
- A map from deduplicated cases to the uniqued case.
"""
cases = lst
uniqued_cases = []
duplicated_cases = {}
# First sort according to the value
cases.sort(key=lambda x: x[1])
# Then group them according to the value
for _, groups in itertools.groupby(cases, key=lambda x: x[1]):
# For each value, sort according to the enumerant symbol.
sorted_group = sorted(groups, key=lambda x: x[0])
# Keep the "smallest" case, which is typically the symbol without extension
# suffix. But we have special cases that we want to fix.
case = sorted_group[0]
for i in range(1, len(sorted_group)):
duplicated_cases[sorted_group[i][0]] = case[0]
if case[0] == 'HlslSemanticGOOGLE':
assert len(sorted_group) == 2, 'unexpected new variant for HlslSemantic'
case = sorted_group[1]
duplicated_cases[sorted_group[0][0]] = case[0]
uniqued_cases.append(case)
return uniqued_cases, duplicated_cases
def toposort(dag, sort_fn):
"""Topologically sorts the given dag.
Arguments:
- dag: a dict mapping from a node to its incoming nodes.
- sort_fn: a function for sorting nodes in the same batch.
Returns:
A list containing topologically sorted nodes.
"""
# Returns the next batch of nodes without incoming edges
def get_next_batch(dag):
while True:
no_prev_nodes = set(node for node, prev in dag.items() if not prev)
if not no_prev_nodes:
break
yield sorted(no_prev_nodes, key=sort_fn)
dag = {
node: (prev - no_prev_nodes)
for node, prev in dag.items()
if node not in no_prev_nodes
}
assert not dag, 'found cyclic dependency'
sorted_nodes = []
for batch in get_next_batch(dag):
sorted_nodes.extend(batch)
return sorted_nodes
def toposort_capabilities(all_cases, capability_mapping):
"""Returns topologically sorted capability (symbol, value) pairs.
Arguments:
- all_cases: all capability cases (containing symbol, value, and implied
capabilities).
- capability_mapping: mapping from duplicated capability symbols to the
canonicalized symbol chosen for SPIRVBase.td.
Returns:
A list containing topologically sorted capability (symbol, value) pairs.
"""
dag = {}
name_to_value = {}
for case in all_cases:
# Get the current capability.
cur = case['enumerant']
name_to_value[cur] = case['value']
# Ignore duplicated symbols.
if cur in capability_mapping:
continue
# Get capabilities implied by the current capability.
prev = case.get('capabilities', [])
uniqued_prev = set([capability_mapping.get(c, c) for c in prev])
dag[cur] = uniqued_prev
sorted_caps = toposort(dag, lambda x: name_to_value[x])
# Attach the capability's value as the second component of the pair.
return [(c, name_to_value[c]) for c in sorted_caps]
def get_capability_mapping(operand_kinds):
"""Returns the capability mapping from duplicated cases to canonicalized ones.
Arguments:
- operand_kinds: all operand kinds' grammar spec
Returns:
- A map mapping from duplicated capability symbols to the canonicalized
symbol chosen for SPIRVBase.td.
"""
# Find the operand kind for capability
cap_kind = {}
for kind in operand_kinds:
if kind['kind'] == 'Capability':
cap_kind = kind
kind_cases = [
(case['enumerant'], case['value']) for case in cap_kind['enumerants']
]
_, capability_mapping = uniquify_enum_cases(kind_cases)
return capability_mapping
def get_availability_spec(enum_case, capability_mapping, for_op, for_cap):
"""Returns the availability specification string for the given enum case.
Arguments:
- enum_case: the enum case to generate availability spec for. It may contain
'version', 'lastVersion', 'extensions', or 'capabilities'.
- capability_mapping: mapping from duplicated capability symbols to the
canonicalized symbol chosen for SPIRVBase.td.
- for_op: bool value indicating whether this is the availability spec for an
op itself.
- for_cap: bool value indicating whether this is the availability spec for
capabilities themselves.
Returns:
- A `let availability = [...];` string if with availability spec or
empty string if without availability spec
"""
assert not (for_op and for_cap), 'cannot set both for_op and for_cap'
DEFAULT_MIN_VERSION = 'MinVersion<SPV_V_1_0>'
DEFAULT_MAX_VERSION = 'MaxVersion<SPV_V_1_5>'
DEFAULT_CAP = 'Capability<[]>'
DEFAULT_EXT = 'Extension<[]>'
min_version = enum_case.get('version', '')
if min_version == 'None':
min_version = ''
elif min_version:
min_version = 'MinVersion<SPV_V_{}>'.format(min_version.replace('.', '_'))
# TODO: delete this once ODS can support dialect-specific content
# and we can use omission to mean no requirements.
if for_op and not min_version:
min_version = DEFAULT_MIN_VERSION
max_version = enum_case.get('lastVersion', '')
if max_version:
max_version = 'MaxVersion<SPV_V_{}>'.format(max_version.replace('.', '_'))
# TODO: delete this once ODS can support dialect-specific content
# and we can use omission to mean no requirements.
if for_op and not max_version:
max_version = DEFAULT_MAX_VERSION
exts = enum_case.get('extensions', [])
if exts:
exts = 'Extension<[{}]>'.format(', '.join(sorted(set(exts))))
# We need to strip the minimal version requirement if this symbol is
# available via an extension, which means *any* SPIR-V version can support
# it as long as the extension is provided. The grammar's 'version' field
# under such case should be interpreted as this symbol is introduced as
# a core symbol since the given version, rather than a minimal version
# requirement.
min_version = DEFAULT_MIN_VERSION if for_op else ''
# TODO: delete this once ODS can support dialect-specific content
# and we can use omission to mean no requirements.
if for_op and not exts:
exts = DEFAULT_EXT
caps = enum_case.get('capabilities', [])
implies = ''
if caps:
canonicalized_caps = []
for c in caps:
if c in capability_mapping:
canonicalized_caps.append(capability_mapping[c])
else:
canonicalized_caps.append(c)
prefixed_caps = [
'SPV_C_{}'.format(c) for c in sorted(set(canonicalized_caps))
]
if for_cap:
# If this is generating the availability for capabilities, we need to
# put the capability "requirements" in implies field because now
# the "capabilities" field in the source grammar means so.
caps = ''
implies = 'list<I32EnumAttrCase> implies = [{}];'.format(
', '.join(prefixed_caps))
else:
caps = 'Capability<[{}]>'.format(', '.join(prefixed_caps))
implies = ''
# TODO: delete this once ODS can support dialect-specific content
# and we can use omission to mean no requirements.
if for_op and not caps:
caps = DEFAULT_CAP
avail = ''
# Compose availability spec if any of the requirements is not empty.
# | |
0, 1)
# Integrate over energy
with np.errstate(divide='ignore', invalid='ignore'):
y = self._U**(3/2) / (1 - self._U)**(7/2)
y = np.where(np.isfinite(y), y, 0)
coeff = 1e6 * (2/self.mass)**(3/2) / (N * kB / K2eV) * (self.E0*eV2J)**(5/2)
Vij = np.array([[V[0]*V[0], V[0]*V[1], V[0]*V[2]],
[V[1]*V[0], V[1]*V[1], V[1]*V[2]],
[V[2]*V[0], V[2]*V[1], V[2]*V[2]]])
T = (coeff * np.trapz(y[:, np.newaxis, np.newaxis] * T, self._U, axis=0)
- (1e6 * self.mass / kB * K2eV * Vij)
)
return T
def velocity(self, N=None):
self.precondition()
if N is None:
N = self.density()
# Integrate over phi
vx = np.trapz(np.cos(self._phi)[:, np.newaxis, np.newaxis] * self._f,
self._phi, axis=0)
vy = np.trapz(np.sin(self._phi)[:, np.newaxis, np.newaxis] * self._f,
self._phi, axis=0)
vz = np.trapz(self._f, self._phi, axis=0)
# Integrate over theta
vx = np.trapz(np.sin(self._theta)[:, np.newaxis]**2 * vx, self._theta, axis=0)
vy = np.trapz(np.sin(self._theta)[:, np.newaxis]**2 * vy, self._theta, axis=0)
vz = np.trapz(np.cos(self._theta)[:, np.newaxis]
* np.sin(self._theta)[:, np.newaxis]
* vz,
self._theta, axis=0)
V = np.array([vx, vy, vz]).T
# Integrate over Energy
with np.errstate(divide='ignore', invalid='ignore'):
y = self._U / (1 - self._U)**3
y = np.where(np.isfinite(y), y, 0)
coeff = -1e3 * 2 * (eV2J * self.E0 / self.mass)**2 / N
V = coeff * np.trapz(y[:, np.newaxis] * V, self._U, axis=0)
return V
def vspace_entropy(self, N=None, s=None):
self.precondition()
if N is None:
N = self.density()
if s is None:
s = self.entropy()
# Assume that the azimuth and polar angle bins are equal size
dtheta = np.diff(self._theta).mean()
dphi = np.diff(self._phi).mean()
# Calculate the factors that associated with the normalized
# volume element
# - U ranges from [0, inf] and np.inf/np.inf = nan
# - Set the last element of y along U manually to 0
# - log(0) = -inf; Zeros come from theta and y. Reset to zero
# - Photo-electron correction can result in negative phase space
# density. log(-1) = nan
coeff = np.sqrt(2) * (eV2J*self.E0/self.mass)**(3/2) # m^3/s^3
with np.errstate(invalid='ignore', divide='ignore'):
y = np.sqrt(self._U) / (1 - self._U)**(5/2)
lnydy = (np.log(y[np.newaxis, :]
* np.sin(self._theta)[:, np.newaxis]
* dtheta * dphi))
y = np.where(np.isfinite(y), y, 0)
lnydy = np.where(np.isfinite(lnydy), lnydy, 0)
# Terms in that make up the velocity space entropy density
sv1 = s # J/K/m^3 ln(s^3/m^6) -- Already multiplied by -kB
sv2 = kB * (1e6*N) * np.log(1e6*N/coeff) # 1/m^3 * ln(1/m^3)
sv3 = np.trapz(y * lnydy * np.sin(self._theta)[:,np.newaxis] * self._f, self._phi, axis=0)
sv3 = np.trapz(sv3, self._theta, axis=0)
sv3 = -kB * 1e12 * coeff * np.trapz(sv3, self._U, axis=0) # 1/m^3
sv4 = np.trapz(y * np.sin(self._theta)[:,np.newaxis] * self._f, self._phi, axis=0)
sv4 = np.trapz(sv4, self._theta, axis=0)
sv4 = -kB * 1e12 * coeff * self._trapz(sv4, self._U)
# Velocity space entropy density
sv = sv1 + sv2 + sv3 + sv4 # J/K/m^3
return sv
@staticmethod
def _trapz(f, x):
dx = x[1:] - x[0:-1]
with np.errstate(divide='ignore', invalid='ignore'):
F = 0.5 * (f[1:] + f[0:-1]) * (dx * np.log(dx))
F = np.where(np.isfinite(F), F, 0)
return np.sum(F)
def center_timestamps(fpi_data):
'''
FPI time stamps are at the beginning of the sampling interval.
Adjust the timestamp to the center of the interval.
Parameters
----------
fpi_data : `xarray.Dataset`
Dataset containing the time coordinates to be centered
Returns
-------
new_data : `xarray.Dataset`
A new dataset with the time coordinates centered
'''
t_delta = np.timedelta64(int(1e9 * (fpi_data['Epoch_plus_var'].data
+ fpi_data['Epoch_minus_var'].data)
/ 2.0), 'ns')
data = fpi_data.assign_coords({'Epoch': fpi_data['Epoch'] + t_delta})
data['Epoch'].attrs = fpi_data.attrs
data['Epoch_plus_var'] = t_delta
data['Epoch_minus_var'] = t_delta
return data
def check_spacecraft(sc):
'''
Check that a valid spacecraft ID was given.
Parameters
----------
sc : str
Spacecraft identifier
'''
if sc not in ('mms1', 'mms2', 'mms3', 'mms4'):
raise ValueError('{} is not a recongized SC ID. '
'Must be ("mms1", "mms2", "mms3", "mms4")'
.format(sc))
def check_mode(mode):
'''
Check that a valid data rate mode was given.
Parameters
----------
mode : str
Data rate mode. Can be ('brst', 'srvy', 'fast'). If 'srvy' is
given, it is changed to 'fast'.
Returns
-------
mode : str
A valid data rate mode for FPI
'''
modes = ('brst', 'fast')
if mode == 'srvy':
mode = 'fast'
if mode not in modes:
raise ValueError('Mode "{0}" is not in {1}'.format(mode, modes))
return mode
def check_species(species):
'''
Check that a valid particle species was given.
Parameters
----------
species : str
Particle species: 'e' or 'i'.
Returns
-------
mode : str
A valid data rate mode for FPI
'''
if species not in ('e', 'i'):
raise ValueError('{} is not a recongized species. '
'Must be ("i", "e")')
def download_ephoto_models():
'''
Download photoelectron model distribution functions.
The file names of the photoelectron models contain the stepper-ids. Which
stepper-id is in use is found externally, in the appropriate dis-moms or
des-moms
'''
# Find the file names
# - Location where they are stored
# - Pattern matching the file names
# - Download the page to serve as a directory listing
url = 'https://lasp.colorado.edu/mms/sdc/public/data/models/fpi/'
fpattern = ('mms_fpi_(brst|fast)_l2_d(i|e)s-bgdist_'
'v[0-9]+.[0-9]+.[0-9]+_p[0-9]+-[0-9]+.cdf')
response = requests.get(url)
# Local repository
local_dir = data_root.joinpath(*url.split('/')[6:9])
if not local_dir.exists():
local_dir.mkdir(parents=True)
local_files = []
# Parse the page and download the files
for match in re.finditer(fpattern, response.text):
# Remote file
remote_fname = match.group(0)
remote_file = '/'.join((url, remote_fname))
# Local file after download
local_fname = local_dir / remote_fname
r = requests.get(remote_file, stream=True, allow_redirects=True)
total_size = int(r.headers.get('content-length'))
initial_pos = 0
# Download
with open(local_fname, 'wb') as f:
with tqdm(total=total_size, unit='B', unit_scale=True,
desc=remote_fname, initial=initial_pos,
ascii=True) as pbar:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
pbar.update(len(chunk))
local_files.append(local_fname)
return local_files
def prep_ephoto(sdc, startdelphi, parity=None):
'''
Prepare the photo electron distributions
'''
# Download the moments file
sdc.optdesc = 'des-moms'
moms_files = sdc.download_files()
moms_files = api.sort_files(moms_files)[0]
cdf = cdfread.CDF(moms_files[0])
scl = np.float(cdf.attget('Photoelectron_model_scaling_factor', entry=0)['Data'])
fphe = cdf.attget('Photoelectron_model_filenames', entry=0)['Data']
cdf.close()
# Check to see if the file name and scaling factor change
# If it does, the implementation will have to change to be
# applied on a per-file basis
for file in moms_files[1:]:
cdf = cdfread.CDF(file)
if scl != cdf.attget('Photoelectron_model_scaling_factor'):
raise ValueError('Scale factor changes between files.')
if fphe != cdf.attget('Photoelectron_model_filenames'):
raise ValueError('Photoelectron mode file name changes.')
cdf.close()
# Extract the stepper number
stepper = ePhoto_Downloader.fname_stepper(fphe)
version = ePhoto_Downloader.fname_version(fphe)
# Load the photo-electron model file
ePhoto = ePhoto_Downloader(mode=sdc.mode)
f_photo = ePhoto.load(stepper, version)
# Map the measured startdelphi to the model startdelphi
idx = np.int16(np.floor(startdelphi/16))
if sdc.mode == 'brst':
f_p0_vname = '_'.join(('mms', 'des', 'bgdist', 'p0', sdc.mode))
f_p1_vname = '_'.join(('mms', 'des', 'bgdist', 'p1', sdc.mode))
sdp_vname = '_'.join(('mms', 'des', 'startdelphi', 'counts', sdc.mode))
f0 = f_photo[f_p0_vname][idx,:,:,:]
f1 = f_photo[f_p1_vname][idx,:,:,:]
f0 = f0.rename({sdp_vname: 'Epoch'}).assign_coords({'Epoch': startdelphi['Epoch']})
f1 = f1.rename({sdp_vname: 'Epoch'}).assign_coords({'Epoch': startdelphi['Epoch']})
# Select the proper parity
f_model = f0.where(parity == 0, f1)
else:
f_vname = '_'.join(('mms', 'des', 'bgdist', sdc.mode))
sdp_vname = '_'.join(('mms', 'des', 'startdelphi', 'counts', sdc.mode))
f_model = (f_photo[f_vname][idx,:,:,:]
.rename({sdp_vname: 'Epoch'})
.assign_coords({'Epoch': startdelphi['Epoch']})
)
return scl * f_model
def load_ephoto(dist_data, sc, mode, level, start_date, end_date):
"""
Load FPI photoelectron model.
Parameters
----------
dist_data : `xarray.Dataset`
Distribution function with ancillary data
sc : str
Spacecraft ID: ('mms1', 'mms2', 'mms3', 'mms4')
mode : str
Instrument mode: ('slow', 'fast', 'brst'). If 'srvy' is given, it is
automatically changed to 'fast'.
level: str
Data quality level: ('l1b', 'silt', 'ql', 'l2', 'trig')
start_date, end_date : `datetime.datetime`
Start and end of the data interval.
Returns
-------
f_model : `xarray.Dataset`
Photoelectron model distribution function.
"""
fpi_instr = 'des'
# Variable names
phi_vname = '_'.join((sc, fpi_instr, 'phi', mode))
theta_vname = '_'.join((sc, fpi_instr, 'theta', mode))
energy_vname = '_'.join((sc, fpi_instr, 'energy', mode))
startdelphi_vname = '_'.join((sc, fpi_instr, 'startdelphi', 'count', mode))
parity_vname = '_'.join((sc, fpi_instr, 'steptable', 'parity', mode))
sector_index_vname = '_'.join(('mms', fpi_instr, 'sector', 'index', mode))
pixel_index_vname = '_'.join(('mms', fpi_instr, 'pixel', 'index', mode))
energy_index_vname = '_'.join(('mms', fpi_instr, 'energy', 'index', mode))
# Get the photoelectron model
sdc = api.MrMMS_SDC_API(sc, 'fpi', mode, level, optdesc='des-dist',
start_date=start_date, end_date=end_date)
if mode == 'brst':
| |
from __future__ import print_function
from builtins import str
from builtins import range
from builtins import object
import test_v1
from common import isolated_creds
from vn_test import *
from vm_test import *
from policy_test import *
import fixtures
from future.utils import with_metaclass
sys.path.append(os.path.realpath('tcutils/pkgs/Traffic'))
from traffic.core.stream import Stream
from traffic.core.profile import create, ContinuousProfile, StandardProfile, BurstProfile, ContinuousSportRange
from traffic.core.helpers import Host
from traffic.core.helpers import Sender, Receiver
from tcutils.util import Singleton
from common.base import GenericTestBase
class AnalyticsBaseTest(GenericTestBase):
@classmethod
def setUpClass(cls):
super(AnalyticsBaseTest, cls).setUpClass()
cls.quantum_h= cls.connections.quantum_h
cls.nova_h = cls.connections.nova_h
cls.vnc_lib= cls.connections.vnc_lib
cls.agent_inspect= cls.connections.agent_inspect
cls.cn_inspect= cls.connections.cn_inspect
cls.analytics_obj=cls.connections.analytics_obj
cls.orch = cls.connections.orch
resource_class = cls.__name__ + 'Resource'
cls.res = ResourceFactory.createResource(resource_class)
#end setUpClass
@classmethod
def tearDownClass(cls):
cls.res.cleanUp()
super(AnalyticsBaseTest, cls).tearDownClass()
#end tearDownClass
def remove_from_cleanups(self, fix):
for cleanup in self._cleanups:
if fix.cleanUp in cleanup:
self._cleanups.remove(cleanup)
break
#end remove_from_cleanups
def check_cmd_output(self, cmd_type, cmd_args_list, check_output=False, form_cmd=True, as_sudo=False, print_output=True):
failed_cmds = []
passed_cmds = []
result = True
for cmd_args in cmd_args_list:
cmd = cmd_args
if form_cmd:
cmd = self._form_cmd(cmd_type, cmd_args)
cmd += cmd + ' | wc -l'
self.logger.info("Running the following cmd:%s \n" %cmd)
if not self.execute_cli_cmd(cmd, check_output, as_sudo=as_sudo, print_output=print_output):
self.logger.error('%s command failed..' % cmd)
failed_cmds.append(cmd)
result = result and False
else:
passed_cmds.append(cmd)
self.logger.info('%s commands passed..\n' % passed_cmds)
self.logger.info('%s commands failed..\n ' % failed_cmds)
return result
# end check_cmd_output
def _form_cmd(self, cmd_type, cmd_args):
cmd = cmd_type
for k, v in cmd_args.items():
if k == 'no_key':
for elem in v:
cmd = cmd + ' --' + elem
else:
cmd = cmd + ' --' + k + ' ' + v
return cmd
# _form_cmd
def execute_cli_cmd(self, cmd, check_output=False, as_sudo=False, print_output=True):
result = True
analytics = self.res.inputs.collector_ips[0]
output = self.res.inputs.run_cmd_on_server(analytics, cmd,
container='analytics-api', as_sudo=as_sudo)
if print_output:
self.logger.info("Output: %s \n" % output)
if output.failed:
self.logger.error('%s command failed..' % cmd)
result = result and False
if check_output:
output_str = str(output)
if not output_str:
self.logger.error("Output is empty")
result = result and False
return result
# end execute_cli_cmd
def setup_flow_export_rate(self, value):
''' Set flow export rate and handle the cleanup
'''
vnc_lib_fixture = self.connections.vnc_lib_fixture
current_rate = vnc_lib_fixture.get_flow_export_rate()
vnc_lib_fixture.set_flow_export_rate(value)
self.addCleanup(vnc_lib_fixture.set_flow_export_rate, current_rate)
# end setup_flow_export_rate
def verify_vna_stats(self,stat_type=None):
result = True
for vn in [self.res.vn1_fixture.vn_fq_name,\
self.res.vn2_fixture.vn_fq_name]:
if stat_type == 'bandwidth_usage':
#Bandwidth usage
if not (int(self.analytics_obj.get_bandwidth_usage\
(self.inputs.collector_ips[0], vn, direction = 'out')) > 0):
self.logger.error("Bandwidth not shown \
in %s vn uve"%(vn))
result = result and False
if not (int(self.analytics_obj.get_bandwidth_usage\
(self.inputs.collector_ips[0], vn, direction = 'in')) > 0):
self.logger.error("Bandwidth not shown \
in %s vn uve"%(vn))
result = result and False
else:
#ACL count
if not (int(self.analytics_obj.get_acl\
(self.inputs.collector_ips[0],vn)) > 0):
self.logger.error("Acl counts not received from Agent uve \
in %s vn uve"%(vn))
result = result and False
if not (int(self.analytics_obj.get_acl\
(self.inputs.collector_ips[0], vn, tier = 'Config')) > 0):
self.logger.error("Acl counts not received from Config uve \
in %s vn uve"%(vn))
result = result and False
#Flow count
if not (int(self.analytics_obj.get_flow\
(self.inputs.collector_ips[0], vn, direction = 'egress')) > 0):
self.logger.error("egress flow not shown \
in %s vn uve"%(vn))
result = result and False
if not (int(self.analytics_obj.get_flow\
(self.inputs.collector_ips[0], vn, direction = 'ingress')) > 0):
self.logger.error("ingress flow not shown \
in %s vn uve"%(vn))
result = result and False
#VN stats
vns = [self.res.vn1_fixture.vn_fq_name,\
self.res.vn2_fixture.vn_fq_name]
vns.remove(vn)
other_vn = vns[0]
if not (self.analytics_obj.get_vn_stats\
(self.inputs.collector_ips[0], vn, other_vn)):
self.logger.error("vn_stats not shown \
in %s vn uve"%(vn))
result = result and False
return result
#end verify_vna_stats
def setup_and_create_streams(self, src_vm, dst_vm, sport=8000, dport=9000, count=100):
traffic_objs = list()
for i in range(3):
sport = sport
dport = dport + i
traffic_objs.append(self.start_traffic(src_vm, dst_vm, 'udp',
sport, dport, fip_ip=dst_vm.vm_ip, count=100))
time.sleep(10)
for traffic_obj in traffic_objs:
self.stop_traffic(traffic_obj)
#end setup_create_streams
def creat_bind_policy(self,policy_name, rules, src_vn_fix, dst_vn_fix):
#method to avoid redundant code for binding
policy_fixture = self.useFixture(
PolicyFixture(
policy_name=policy_name,
rules_list=rules,
inputs=self.inputs,
connections=self.connections))
src_vn_fix.bind_policies([policy_fixture.policy_fq_name], src_vn_fix.vn_id)
self.addCleanup(src_vn_fix.unbind_policies, src_vn_fix.vn_id, [
policy_fixture.policy_fq_name])
dst_vn_fix.bind_policies([policy_fixture.policy_fq_name], dst_vn_fix.vn_id)
self.addCleanup(dst_vn_fix.unbind_policies, dst_vn_fix.vn_id, [
policy_fixture.policy_fq_name])
#end create and bind policy
def verify_session_record_table(self, start_time, src_vn, dst_vn):
self.logger.info('Verify session record table')
result = True
for ip in self.inputs.collector_ips:
self.logger.info(
"Verifying SessionRecordTable through opserver %s" %
(ip))
#query and verify number of client session records
query = 'vn=' + src_vn + ' AND remote_vn=' + dst_vn + ' AND protocol=17'
res = self.analytics_obj.ops_inspect[ip].post_query(
'SessionRecordTable',
start_time=start_time,
end_time='now',
select_fields=['forward_flow_uuid',
'reverse_flow_uuid', 'vn', 'remote_vn'],
where_clause=query,
session_type='client')
if len(res) != 3:
self.logger.error('Expected client session records 3 got %s'%len(res))
result = result and False
self.logger.debug(res)
#query and verify number of server session records
query = 'vn=' + dst_vn + ' AND remote_vn=' + src_vn + ' AND protocol=17'
res = self.analytics_obj.ops_inspect[ip].post_query(
'SessionRecordTable',
start_time=start_time,
end_time='now',
select_fields=['forward_flow_uuid',
'reverse_flow_uuid', 'vn', 'remote_vn'],
where_clause=query,
session_type='server')
if len(res) != 3:
self.logger.error('Expected server session records 3 got %s'%len(res))
result = result and False
self.logger.debug(res)
#query with local_ip server_port protocol
query = 'local_ip=%s AND server_port=9001 AND protocol=17'%self.res.vn1_vm1_fixture.vm_ip
res = self.analytics_obj.ops_inspect[ip].post_query(
'SessionRecordTable',
start_time=start_time,
end_time='now',
select_fields=['vn', 'remote_vn'],
where_clause=query,
session_type="client")
if len(res) != 1:
self.logger.error('Expected session records 1 got %s'%len(res))
result = result and False
self.logger.debug(res)
#query with server_port local_ip filter by server_port
query = 'vn=' + src_vn + ' AND remote_vn=' + dst_vn + ' AND protocol=17'
res = self.analytics_obj.ops_inspect[ip].post_query(
'SessionRecordTable',
start_time=start_time,
end_time='now',
select_fields=['forward_flow_uuid',
'reverse_flow_uuid', 'local_ip', 'server_port'],
where_clause=query,
filter='server_port=9001',
session_type="client")
if len(res) != 1 :
self.logger.error('Expected session records 1 got %s'%len(res))
result = result and False
self.logger.debug(res)
#query with client_port remote_ip filter by client_port
#Total we get three record limit by 2
query = 'vn=' + src_vn + ' AND remote_vn=' + dst_vn + ' AND protocol=17'
res = self.analytics_obj.ops_inspect[ip].post_query(
'SessionRecordTable',
start_time=start_time,
end_time='now',
select_fields=['forward_flow_uuid',
'reverse_flow_uuid', 'remote_ip', 'client_port'],
where_clause=query,
filter='client_port=8000',
limit=2,
session_type="client")
if len(res) != 2:
self.logger.error('Expected session records 2 got %s'%len(res))
result = result and False
self.logger.debug(res)
#query with sort_fields
query = 'vn=' + src_vn + ' AND remote_vn=' + dst_vn + ' AND protocol=17'
res = self.analytics_obj.ops_inspect[ip].post_query(
'SessionRecordTable',
start_time=start_time,
end_time='now',
select_fields=['forward_flow_uuid',
'reverse_flow_uuid', 'local_ip', 'server_port'],
where_clause=query,
sort_fields=['server_port'], sort=1,
session_type="client")
if res and res[0]['server_port'] != 9000:
self.logger.error('Expected server port 9000 got %s'%res[0]['server_port'])
result = result and False
self.logger.debug(res)
assert result,'Failed to get expected number of Records'
#end verify_session_record_table
def verify_session_series_table(self, start_time, src_vn, dst_vn):
self.logger.info('Verify session series table and aggregation stats')
result = True
query = 'vn=' + src_vn + ' AND remote_vn=' + dst_vn + ' AND protocol=17'
granularity =10
vm_node_ip = self.res.vn1_vm1_fixture.vm_node_data_ip
svc_name = self.inputs.host_data[vm_node_ip]['service_name']
if not svc_name:
vm_host = self.inputs.host_data[vm_node_ip]['host_ip']
else:
vm_host = self.inputs.host_data[vm_node_ip]['service_name'][vm_node_ip]
ip = self.inputs.collector_ips[0]
self.logger.info("Verifying SessionSeriesTable through opserver %s" %(ip))
#query client session samples
self.logger.info('SessionSeries: [SUM(forward_sampled_bytes), SUM(reverse_sampled_pkts), sample_count]')
res = self.analytics_obj.ops_inspect[ip].post_query(
'SessionSeriesTable',
start_time=start_time,
end_time='now',
select_fields=['SUM(forward_sampled_pkts)', 'SUM(reverse_sampled_bytes)', 'sample_count', 'vrouter'],
where_clause=query,
filter='vrouter=%s'% vm_host, session_type="client")
if len(res) != 1 and res[0]['SUM(forward_sampled_pkts)'] != 300:
self.logger.error('Session aggregate stats returned %s not expected'%len(res))
result = result and False
self.logger.debug(res)
#have three server ports so three record in output each with sum(forward pkts) 100
res = self.analytics_obj.ops_inspect[ip].post_query(
'SessionSeriesTable',
start_time=start_time,
end_time='now',
select_fields=['server_port','local_ip','SUM(forward_sampled_pkts)', 'SUM(reverse_sampled_bytes)', 'sample_count', 'vrouter_ip'],
where_clause=query,
session_type="client")
status = True
for rec in res:
if rec['SUM(forward_sampled_pkts)'] != 100:
status = result and False
if len(res) != 3 and not status:
self.logger.error('Session series records returned %s not expected'%len(res))
result = result and status
self.logger.debug(res)
## all session msgs have same vn-remote_vn hence following query should return 1 record
res = self.analytics_obj.ops_inspect[ip].post_query(
'SessionSeriesTable',
start_time=start_time,
end_time='now',
select_fields=['vn','remote_vn','SUM(forward_sampled_pkts)', 'SUM(reverse_sampled_bytes)', 'sample_count'],
where_clause=query,
session_type="client")
if len(res) != 1 and res[0].get('SUM(forward_sampled_pkts)') !=300 :
self.logger.error('Session series records returned %s not expected'%len(res))
result = result and False
self.logger.debug(res)
## sort results by server_port column
res = self.analytics_obj.ops_inspect[ip].post_query(
'SessionSeriesTable',
start_time=start_time,
end_time='now',
select_fields=['vmi','local_ip','server_port','SUM(forward_sampled_bytes)', 'SUM(reverse_sampled_pkts)', 'sample_count'],
where_clause=query,
sort_fields=['server_port'], sort=1, limit=3,
session_type="client")
if len(res) !=3 and res[0]['server_port'] != 9000:
self.logger.error('Session series records with sort fileld returned %s not expected'%len(res))
result = result and False
self.logger.debug(res)
#verify granularity with T=10
res = self.analytics_obj.ops_inspect[ip].post_query(
'SessionSeriesTable',
start_time=start_time,
end_time='now',
select_fields=['T=%s' % (granularity), 'SUM(forward_sampled_bytes)',
'SUM(reverse_sampled_pkts)', 'vrouter'],
where_clause=query,
session_type="client")
if not len(res) :
self.logger.error('Session series records with granularity returned %s not expected'%len(res))
result = result and False
self.logger.debug(res)
#with sampled bytes
res = self.analytics_obj.ops_inspect[ip].post_query(
'SessionSeriesTable',
start_time=start_time,
end_time='now',
select_fields=['T', 'forward_sampled_bytes', 'reverse_sampled_pkts'],
where_clause=query + ' AND server_port=9001',
session_type="client")
if not len(res) :
self.logger.error('Session series records with specific server_port returned %s not expected'%len(res))
result = result and False
self.logger.debug(res)
#with logged bytes
res = self.analytics_obj.ops_inspect[ip].post_query(
'SessionSeriesTable',
start_time=start_time,
end_time='now',
select_fields=['T', 'forward_logged_pkts', 'reverse_logged_bytes'],
where_clause=query,
session_type="client")
if not len(res) :
self.logger.error('Session series records with logged _bytes/pkts returned %s not expected'%len(res))
result | |
<reponame>DucVinh2609/mtb_admin<filename>app/views.py<gh_stars>0
from flask import render_template, request, url_for, redirect, send_from_directory, jsonify, session
from flask_login import login_user, logout_user, current_user, login_required
from werkzeug.exceptions import HTTPException, NotFound, abort
from flask_restful import Resource, Api
from datetime import date, timedelta, datetime
from time import mktime
from dateutil.parser import parse
from flask_wtf import Form
from wtforms.fields.html5 import DateField
from wtforms import StringField
from werkzeug.utils import secure_filename
import os, logging
from app import app, lm, db, bc
from app.models import User
from app.forms import LoginForm, RegisterForm, AddMovietypesForm, EditMovietypesForm, AddMovieFormatsForm, EditMovieFormatsForm, AddRolesForm, EditRolesForm, AddEmployeesForm, EditEmployeesForm, AddCountriesForm, EditCountriesForm, AddMoviesForm, EditMoviesForm, AddSeattypesForm, EditSeattypesForm, AddRoomformatsForm, EditRoomformatsForm, AddRoomsForm, EditRoomsForm, AddStatusForm, EditStatusForm, EditPassForm
from flaskext.mysql import MySQL
UPLOAD_FOLDER = 'D:/python/heroku/mtb-admin/app/static/assets/img/uploads'
ALLOWED_EXTENSIONS = {'txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'}
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
api = Api(app)
mysql = MySQL()
app.config['MYSQL_DATABASE_USER'] = 'root'
app.config['MYSQL_DATABASE_PASSWORD'] = ''
app.config['MYSQL_DATABASE_DB'] = 'mtb_db'
app.config['MYSQL_DATABASE_HOST'] = 'localhost'
# app.config['MYSQL_DATABASE_USER'] = 'ducvinh26091997'
# app.config['MYSQL_DATABASE_PASSWORD'] = '<PASSWORD>'
# app.config['MYSQL_DATABASE_DB'] = 'mtb_admin'
# app.config['MYSQL_DATABASE_HOST'] = 'db4free.net'
mysql.init_app(app)
tvalue= str(datetime.today().strftime("%Y-%m-%d"))
today = datetime.strptime(tvalue, '%Y-%m-%d').date()
class Database:
def __init__(self):
self.con = mysql.connect()
self.cur = self.con.cursor()
def list_movietypes(self):
self.cur.execute("SELECT id, name from movietypes")
result = self.cur.fetchall()
return result
def list_movieformats(self):
self.cur.execute("SELECT id, name from movieformats")
result = self.cur.fetchall()
return result
def list_roles(self):
self.cur.execute("SELECT id, role_name from roles")
result = self.cur.fetchall()
return result
def list_employees(self):
self.cur.execute("SELECT username, fullname, birthday, address, phone, gender, role_name, avatar from employees INNER JOIN roles ON employees.role_id = roles.id ")
result = self.cur.fetchall()
return result
def list_countries(self):
self.cur.execute("SELECT country_code, country from countries")
result = self.cur.fetchall()
return result
def list_movies(self):
self.cur.execute("SELECT id, name, movieformat_id, movietype_id, duration, country_code, start_date, end_date, image, note, description from movies")
result = self.cur.fetchall()
return result
def list_seattypes(self):
self.cur.execute("SELECT id, seattype_name from seattypes")
result = self.cur.fetchall()
return result
def list_roomformats(self):
self.cur.execute("SELECT id, name from roomformats")
result = self.cur.fetchall()
return result
def list_rooms(self):
self.cur.execute("SELECT id, room_name, roomformat_id, status, max_row_seat, max_seat_row, note from rooms")
result = self.cur.fetchall()
return result
def list_status(self):
self.cur.execute("SELECT id, seat_condition from status")
result = self.cur.fetchall()
return result
def list_showings(self):
self.cur.execute("SELECT room_id, time, showtime, showings.id, movie_id, movies.name, duration from showings INNER JOIN movies ON showings.movie_id = movies.id ORDER BY time ASC")
result = self.cur.fetchall()
return result
def count_member(self):
self.cur.execute("SELECT COUNT(username) from members")
result = self.cur.fetchone()
return result
def total_sale(self):
self.cur.execute("SELECT price, create_at from tickets")
result = self.cur.fetchall()
return result
def seat_on_room(self):
self.cur.execute("SELECT max_row_seat, max_seat_row, id from rooms")
result = self.cur.fetchall()
return result
def count_showing(self):
self.cur.execute("SELECT room_id FROM showings WHERE showtime=%s",today)
result = self.cur.fetchall()
return result
def seat_booked_day(self):
self.cur.execute("SELECT price, unitprice FROM tickets d INNER JOIN showings v ON v.id = d.showing_id WHERE v.showtime=%s",today)
result = self.cur.fetchall()
return result
def ticket_purchase(self):
self.cur.execute("SELECT username FROM tickets")
result = self.cur.fetchall()
return result
@app.route('/sitemap.xml')
def sitemap():
return send_from_directory(os.path.join(app.root_path, 'static'), 'sitemap.xml')
@app.route('/googlee35aa2f2fd7b0c5b.html')
def google():
return send_from_directory(os.path.join(app.root_path, 'static'), 'googlee35aa2f2fd7b0c5b.html')
@app.route('/print')
def printMsg():
app.logger.warning('testing warning log')
app.logger.error('testing error log')
app.logger.info('testing info log')
return "Check your console"
# provide login manager with load_user callback
@lm.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
# authenticate user
@app.route('/logout')
def logout():
session.pop('loggedin', None)
session.pop('username', None)
session.pop('avatar', None)
# Redirect to login page
return redirect(url_for('login'))
# register user
@app.route('/register.html', methods=['GET', 'POST'])
def register():
# declare the Registration Form
form = RegisterForm(request.form)
msg = None
if request.method == 'GET':
return render_template('layouts/auth-default.html',
content=render_template( 'pages/register.html', form=form, msg=msg ) )
# check if both http method is POST and form is valid on submit
if form.validate_on_submit():
# assign form data to variables
username = request.form.get('username', '', type=str)
password = request.form.get('password', '', type=str)
email = request.form.get('email' , '', type=str)
# filter User out of database through username
user = User.query.filter_by(user=username).first()
# filter User out of database through username
user_by_email = User.query.filter_by(email=email).first()
if user or user_by_email:
msg = 'Error: User exists!'
else:
pw_hash = password #<PASSWORD>_password_hash(password)
user = User(username, email, pw_hash)
user.save()
msg = 'User created, please <a href="' + url_for('login') + '">login</a>'
else:
msg = 'Input error'
return render_template('layouts/auth-default.html',
content=render_template( 'pages/register.html', form=form, msg=msg ) )
# authenticate user
@app.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm(request.form)
msg = None
if form.validate_on_submit():
username = request.form.get('username', '', type=str)
password = request.form.get('password', '', type=str)
conn = mysql.connect()
cursor = conn.cursor()
cursor.execute("SELECT * from employees where username='" + username + "' and password='" + password + "'")
account = cursor.fetchone()
if account:
session['loggedin'] = True
session['username'] = account[0]
session['avatar'] = account[8]
return redirect('/home')
else:
msg = "Username or Password is wrong"
return render_template('layouts/auth-default.html', content=render_template( 'pages/login.html', form=form, msg=msg ) )
# Profile
@app.route('/profile')
def profile():
# Check if user is loggedin
if 'loggedin' in session:
conn = mysql.connect()
cursor = conn.cursor()
cursor.execute('SELECT * FROM employees WHERE username = %s', [session['username']])
account = cursor.fetchone()
return render_template('layouts/default.html', content=render_template( 'pages/profile.html', account=account, username=session['username'], avatar=session['avatar'] ) )
return redirect(url_for('login'))
@app.route('/icons.html')
def icons():
return render_template('layouts/default.html',
content=render_template( 'pages/icons.html') )
# Settings
@app.route('/settings')
def settings():
try:
form = EditEmployeesForm(request.form)
conn = mysql.connect()
cursor = conn.cursor()
cursor.execute("SELECT username, fullname, birthday, address, phone, gender, role_id, avatar, about from employees WHERE username=%s", session['username'])
row = cursor.fetchone()
if row:
return render_template('layouts/default.html', content=render_template( 'pages/settings.html', form=form, row=row, username=session['username'], avatar=session['avatar']))
else:
return 'Error loading #{username}'.format(username=username)
except Exception as e:
print(e)
finally:
cursor.close()
conn.close()
# Change password
@app.route('/change-password')
def password():
form = EditPassForm(request.form)
msg = None
conn = mysql.connect()
cursor = conn.cursor()
cursor.execute("SELECT password FROM employees WHERE username=%s", session['username'])
row = cursor.fetchone()
return render_template('layouts/default.html', content=render_template( 'pages/change-password.html', row=row, form=form, msg=msg, username=session['username'], avatar=session['avatar']))
@app.route('/password/update/<string:new_password>')
def update_password(new_password):
try:
sql = "UPDATE employees SET password=%s WHERE username=%s"
data = (new_password, session['username'],)
conn = mysql.connect()
cursor = conn.cursor()
cursor.execute(sql, data)
conn.commit()
return redirect('/profile')
except Exception as e:
print(e)
finally:
cursor.close()
conn.close()
# Render the tables page
@app.route('/tables.html')
def tables():
return render_template('layouts/default.html',
content=render_template( 'pages/tables.html') )
# App main route + generic routing
@app.route('/home')
def home():
def db_query():
db = Database()
emps = db.count_member()
return emps
countmember = db_query()
def db_query2():
db = Database()
emps = db.seat_on_room()
return emps
total_seat_room = db_query2()
def db_query3():
db = Database()
emps = db.count_showing()
return emps
showing = db_query3()
def db_query4():
db = Database()
emps = db.seat_booked_day()
return emps
seat_booked = db_query4()
def db_query5():
db = Database()
emps = db.ticket_purchase()
return emps
ticket_purchase_form = db_query5()
def db_query1():
db = Database()
emps = db.total_sale()
return emps
sale = db_query1()
by_member = 0
no_by_member = 0
for row in ticket_purchase_form:
if row[0]=="no":
no_by_member += 1
else:
by_member += 1
ratio = round((by_member/(no_by_member + by_member))*100,2)
unratio = round(100-ratio,2)
total = 0
sale_may = 0
sale_jun = 0
sale_jul = 0
sale_aug = 0
sale_sep = 0
sale_oct = 0
sale_nov = 0
sale_dec = 0
for row in sale:
total += int(row[0])
for row in sale:
if row[1].month==5:
sale_may += row[0]
elif row[1].month==6:
sale_jun += row[0]
elif row[1].month==7:
sale_jul += row[0]
elif row[1].month==8:
sale_aug += row[0]
elif row[1].month==9:
sale_sep += row[0]
elif row[1].month==10:
sale_oct += row[0]
elif row[1].month==11:
sale_nov += row[0]
elif row[1].month==12:
sale_dec += row[0]
total_seat_day = 0
for row in showing:
for row1 in total_seat_room:
if row[0]==row1[2]:
total_seat_day += int(row1[0])*int(row1[1])
total_seat_booked_day = 0
for row in seat_booked:
total_seat_booked_day += int(row[0])/int(row[1])
try:
performance = round((total_seat_booked_day/total_seat_day)*100,2)
except ZeroDivisionError:
performance = 0
# Check if user is loggedin
if 'loggedin' in session:
conn = mysql.connect()
cursor = conn.cursor()
cursor.execute('SELECT * FROM employees WHERE username = %s', [session['username']])
account = cursor.fetchone()
cursor1 = conn.cursor()
cursor1.execute('SELECT * FROM members ORDER BY create_at DESC LIMIT 5')
member = cursor1.fetchall()
cursor2 = conn.cursor()
cursor2.execute('SELECT COUNT(id) FROM tickets WHERE MONTH(create_at)=7')
jul = cursor2.fetchone()
cursor3 = conn.cursor()
cursor3.execute('SELECT COUNT(id) FROM tickets WHERE MONTH(create_at)=8')
aug = cursor3.fetchone()
cursor4 = conn.cursor()
cursor4.execute('SELECT COUNT(id) FROM tickets WHERE MONTH(create_at)=9')
sep = cursor4.fetchone()
cursor5 = conn.cursor()
cursor5.execute('SELECT COUNT(id) FROM tickets WHERE MONTH(create_at)=10')
oct = cursor5.fetchone()
cursor6 = conn.cursor()
cursor6.execute('SELECT COUNT(id) FROM tickets WHERE MONTH(create_at)=11')
nov = cursor6.fetchone()
cursor7 = conn.cursor()
cursor7.execute('SELECT COUNT(id) FROM tickets WHERE MONTH(create_at)=12')
dec = cursor7.fetchone()
return render_template('layouts/default.html', content=render_template( 'pages/index.html', account=account, jul=jul, aug=aug, sep=sep, oct=oct, nov=nov, dec=dec, sale_may=sale_may, sale_jun=sale_jun, sale_jul=sale_jul, sale_aug=sale_aug, sale_sep=sale_sep, sale_oct=sale_oct, sale_nov=sale_nov, sale_dec=sale_dec, member=member, username=session['username'], avatar=session['avatar'], countmember=countmember, total=total, performance=performance, by_member=by_member, no_by_member=no_by_member, ratio=ratio, unratio=unratio))
# User is not loggedin redirect to login page
return redirect(url_for('login'))
@app.route('/<path>')
def index(path):
content = None
try:
if 'loggedin' in session:
conn = mysql.connect()
cursor = conn.cursor()
cursor.execute('SELECT * FROM employees WHERE username = %s', [session['username']])
account = cursor.fetchone()
return render_template('layouts/default.html', account=account,
content=render_template( 'pages/'+path) )
# User is not loggedin redirect to login page
return redirect(url_for('login'))
except:
return render_template('layouts/auth-default.html',
content=render_template( 'pages/404.html' ) )
# Movie Types
@app.route('/movietypes')
def movietypes():
def db_query():
db | |
<reponame>mitdo/o2ac-ur
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Software License Agreement (BSD License)
#
# Copyright (c) 2021, Chukyo University
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Chukyo University nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Author: <NAME>
import cv2
import numpy as np
import math
import copy
import os
import json
import math
import random
"""
Downsampling for binary image
Input:
im: input image
fx, fy: scaling factor along with x and y coordinate
Return:
im_ds: downsampled image
"""
def downsampling_binary(im, _fx=0.5, _fy=0.5):
im_ds = cv2.resize(im, None, fx=_fx, fy=_fy, interpolation=cv2.INTER_AREA)
im_ds = np.clip(im_ds, 0, 1)
return im_ds.copy()
"""
In-plane Rotation Estimation using Distribution of Line Segment Directions
"""
class RotationEstimation:
def __init__(self, im, bbox, lseg_len=15, n_bin=90):
# Parameter for LSDH
self._lseg_len = lseg_len # length of line segment
self._n_bin = n_bin # number of histogram's bin
self._step = int(self._lseg_len / 2) # sampling step of line segment
# Parameter for image processing
self._canny1 = 100 # parameter1 for canny edge detection
self._canny2 = 200 # parameter2 for canny edge detection
self._continuous_streaming_mode = cv2.RETR_EXTERNAL # contour extraction mode
# Variables
self._deg2rad = math.pi / 180.0
self._im = im # input image
self._bbox = bbox # bounding box. list[x,y,w,h]
self._im_bb = self.im_crop_bbox(self._im, self._bbox) # image of bounding box
self._im_edge = None
self._contours = None # contours
# Do main processing
self._iprs = self.main_proc() # in-plane rotations (in degree)
"""
Crop image
"""
def im_crop_bbox(self, im, bbox):
im_bb = im[bbox[1] : bbox[1] + bbox[3], bbox[0] : bbox[0] + bbox[2]]
im_out = im_bb.copy()
return im_out
"""
Contour detection from image
"""
def contour_detection(self):
im_bb = self.im_crop_bbox(self._im, self._bbox)
# Histogram normalization
# create a CLAHE object (Arguments are optional).
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
cl1 = clahe.apply(self._im_bb)
# Edge detection
edges = cv2.Canny(cl1, self._canny1, self._canny2)
# cv2.imshow("cl1",cl1)
# cv2.waitKey(0)
# cv2.imshow("edges",edges)
# cv2.waitKey(0)
self._im_edge = edges
# findContours
# check version of cv2
cv2_versoin = cv2.__version__
if "4" == cv2_versoin[0]:
contours, hierarchy = cv2.findContours(
edges, self._continuous_streaming_mode, cv2.CHAIN_APPROX_NONE
)
else:
_, contours, hierarchy = cv2.findContours(
edges, self._continuous_streaming_mode, cv2.CHAIN_APPROX_NONE
)
return contours
"""
Make a Line Segment Direction Histogram from a list of contour pixels
"""
def make_LSDH(self, cont):
i = 0
hist = np.zeros(self._n_bin, np.float)
while i < len(cont) - self._lseg_len:
# get line segment
lseg = cont[i + self._lseg_len][0] - cont[i][0]
# compute direction of line segment
if lseg[0] != 0:
dir = 180.0 * math.atan(float(lseg[1]) / float(lseg[0])) / math.pi
else:
dir = 90.0
if dir < 0.0:
dir += 180.0
bin = int(dir / (180.0 / self._n_bin))
# Vote to the histogram
hist[bin] += 1.0
if bin == self._n_bin - 1:
hist[0] += 0.5
hist[bin - 1] += 0.5
elif bin == 0:
hist[self._n_bin - 1] += 0.5
hist[1] += 0.5
else:
hist[bin - 1] += 0.5
hist[bin + 1] += 0.5
i += self._step
return hist
"""
Compute dominant orientation from histogram
"""
def compute_orientation(self, hist):
# Normalize histogram
sum = np.sum(hist)
hist = hist / (sum + 0.00001)
# detect peak
vote_max_bin = np.argmax(hist)
# Convert the bin index to the orientation in degree
in_plane_rotation = vote_max_bin * 180.0 / self._n_bin
return [in_plane_rotation]
"""
Main processing
"""
def main_proc(self):
# detect contours from the bounding box image.
self._contours = self.contour_detection()
# make a Line Segment Direction Histogram (LSDH)
lsdh = np.zeros(self._n_bin, np.float)
for cont in self._contours:
hist = self.make_LSDH(cont)
lsdh += hist
im_res = self.get_im_bb()
im_res = cv2.cvtColor(im_res, cv2.COLOR_GRAY2BGR)
# compute domitant orientation of LSDH
orientation = self.compute_orientation(lsdh)
return orientation
def get_im_bb(self):
return self._im_bb.copy()
def get_im_edge(self):
return self._im_edge.copy()
def get_im_contours(self):
im_bb3 = cv2.cvtColor(self._im_bb.copy(), cv2.COLOR_GRAY2BGR)
im_cont = cv2.drawContours(im_bb3, self._contours, -1, (0, 255, 0), 1)
return im_cont
def get_result_image(self):
im_res = self.get_im_bb()
im_res = cv2.cvtColor(im_res, cv2.COLOR_GRAY2BGR)
pt1 = (int(self._bbox[2] / 2), int(self._bbox[3] / 2))
cv2.circle(im_res, pt1, 3, (0, 255, 0), -1, cv2.LINE_AA)
line_length = int(min(self._bbox[2:]) / 2)
for rot in self._iprs:
pt2 = (
int(pt1[0] + line_length * math.cos(rot * self._deg2rad)),
int(pt1[1] + line_length * math.sin(rot * self._deg2rad)),
)
cv2.arrowedLine(im_res, pt1, pt2, (0, 255, 0), 1, cv2.LINE_AA)
return im_res
"""
Get orientation
Input:
flip: if true, detected orientatios and fliped orientations are returned.
Return;
list of orientations
"""
def get_orientation(self, flip=True):
orientations = copy.deepcopy(self._iprs)
if flip is True:
for ori in orientations:
self._iprs.append(ori + 180.0)
return self._iprs
"""
Binary Template Matching
Input:
temp: template, 1ch numpy array
scene: input scene, 1 ch numpy array
region: search region (+-search_y, +-search_x)
"""
class BinaryTemplateMatching:
def __init__(self, temp, scene, region=(0, 0)):
self._temp = temp.copy() # template
self._scene = scene.copy() # scene
self._region = region # search region
self._initial_pad = np.array([0, 0], np.int)
self.initial_padding()
self._offset_ini_pad = np.array(
(self._initial_pad[0], self._initial_pad[0]), np.int
)
self._t_size = np.asarray(self._temp.shape) # size of template
self._s_size = np.asarray(self._scene.shape) # size of scene
# left-top pixel of an initial search position
self._ltop_c = ((self._s_size - self._t_size) / 2).astype(np.int)
# Matching, compute score map, _s_map, and offset list
self._s_map, self._offset_list = self.main_proc()
"""
If the size of scene is smaller than that of template, zero padding is applied.
"""
def initial_padding(self):
s_shape = self._scene.shape
t_shape = self._temp.shape
diff_size_y = t_shape[0] - s_shape[0]
diff_size_x = t_shape[1] - s_shape[1]
diff_max = max(diff_size_y, diff_size_x)
if diff_max > 0:
pad_size = int(diff_max / 2) + 1
self._scene = np.pad(self._scene, (pad_size, pad_size), "minimum")
self._initial_pad = np.array((pad_size, pad_size), np.int)
def padding(self, im, param):
pad_size = max(param) # define padding size
im_pad = np.pad(im, (pad_size, pad_size), "edge")
return im_pad, pad_size
def main_proc(self):
# padding
pad_scene, pad_size = self.padding(self._scene, self._region)
pad_scene = pad_scene > 0
pad_scene = np.logical_not(pad_scene)
pad_scene = np.asarray(pad_scene, np.uint8)
pad_scene = cv2.distanceTransform(pad_scene, cv2.DIST_L2, 5)
# score map
s_map = np.zeros([self._region[0] * 2 + 1, self._region[1] * 2 + 1])
offset_list = list() # offset_list[n]+t_size/2 = center pixel
for sj, j in enumerate(range(-self._region[0], self._region[0] + 1)):
for si, i in enumerate(range(-self._region[1], self._region[1] + 1)):
offset = self._ltop_c + np.array([j, i]) + pad_size
s_map[sj, si] = np.sum(
self._temp
* pad_scene[
offset[0] : offset[0] + self._t_size[0],
offset[1] : offset[1] + self._t_size[1],
]
)
offset_list.append(offset - pad_size)
return s_map, offset_list
# return the location in scene coordinate system and its score
def get_result(self):
res = np.argmin(self._s_map)
res_offset = self._offset_list[res] - self._initial_pad
center_pixel = res_offset + self._t_size / 2
return center_pixel, res_offset, np.min(self._s_map)
def get_result_image(self, _offset):
offset = _offset.copy()
pad_scene, pad_size = self.padding(self._scene, self._region)
im_res = np.zeros(pad_scene.shape)
offset += pad_size + self._initial_pad
im_res[
offset[0] : offset[0] + self._t_size[0],
offset[1] : offset[1] + self._t_size[1],
] = self._temp
im_res += pad_scene
return im_res
def get_score_map(self):
return self._s_map
class TemplateMatching:
def __init__(self, im_c, ds_rate, temp_root, temp_info_name="template_info.json"):
"""
im_c: an image of input scene
ds_rate: downsampling rate
temp_root: path to templates
temp_info_name: temp_info_name
"""
# downsampling rate
self.ds_rate = ds_rate
# input image
self.im_c = im_c
#
self.temp_root = temp_root
""" Load template infomation """
temp_info_fullpath = os.path.join(temp_root, temp_info_name)
if | |
import pymongo
import os
import base64
import datetime
from tornado.ioloop import IOLoop
from tornado.web import Application, RequestHandler
import threading
import getpass
import json
import logging
import ast
import time
from urllib import request, parse
import requests
import operator
from queue import Queue
from dhm_security_module import *
# logger setup
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s",
handlers=[
logging.FileHandler("{0}/{1}.log".format(os.getcwd()+"/logs/", "logger")),
logging.StreamHandler()
])
logger = logging.getLogger()
HOST = "localhost"
PORT = 7777
db_client_name = "IOT_DHM_DB"
col_a3c_gw = "a3c_gw_info"
col_gw = "gw_info"
col_dh = "dh_info"
db_client = None
GW_ADDR = "http://192.168.1.79" # Hardcoded gateway address (assuming discovery already occurred inside the local area network)
GW_PORT = "1111" # hardcoded gateway port
# dictionary containing the connection session properties with the clients
# A session includes
# > session key
# > ...
connections = {}
# list of devices managed by this dhm server
dh_list = []
# list of gateways to communicate
gw_list = []
# global variables
server_id = None
server_pub_key_pem = None
server_pub_key = None
server_priv_key = None
# thread responsible for revoking the client session
# every X seconds the thread will check for clients
class ConnectionManager(threading.Thread):
def run(self):
# < TODO >
print("oi")
# Class to be used if the registration process is initiated
# by the AAA server itself. The server polls for online
# gateways and sends its information and location to
# the gateway. The registration is performed by also
# sending its public key along with its signature.
# < TODO >
'''
class RegistrationManager(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
return
'''
class MainHandler(RequestHandler):
def get(self):
self.write("IoT DHM Server")
# Class to be used if the registration process is initiated by
# the gateways themselves. They actively wait and search for the
# AAA server to be operational. When it is they then annouce their
# idenity to the server which must be verified and valid
class RegistrationHandler(RequestHandler):
def post(self):
return
class AuthenticationHandler(RequestHandler):
def post(self):
return
class DHConfirmHandler(RequestHandler):
def post(self):
dh_uuid = self.get_body_argument('dh_uuid')
status = self.get_body_argument('status')
print(status)
if(status == "OK"):
logger.info("DH " + dh_uuid +" successfully configured")
def get(self):
print("GET")
def make_app():
urls = [
("/", MainHandler),
("/authenticationRequest/", AuthenticationHandler),
("/registrationRequest/", RegistrationHandler),
("/dhSessionConfirm/", DHConfirmHandler)
]
return Application(urls, debug=True)
def authTicketGen(device, derived_key, gw_id):
key = os.urandom(16) # 128 bit key for GW <-> DH session
#secret = encryptRSA(loadKey(device['pub_key'].encode()), key)
secret = encryptRSAPKCS1v15(loadKey(device['pub_key'].encode()), key)
#print("Session key (K) for GW <-> DH session establishment")
#for b in key:
# print(int(b))
public = {'dh_id' : device['uuid'], 'dh_addr' : device['ble_addr'], 'owner_id' : gw_id, 'access_rights' : 'Rx', 'ticket_lifetime': 3600}
m = secret + str(public).encode()
signature = signRsaPKCS1v15(server_priv_key, m)
ticket = {
'secret': secret, #base64.b64encode(secret),
'public' : base64.b64encode(str(public).encode()),
'signature': signature #base64.b64encode(signature),
}
return ticket, key
# thread responsible for managing the connections and sessions between the DHM and each GW
class DHM_GW_Session(threading.Thread):
def __init__(self, queue, gw_addr, gw_id, a3c_pub_key, session_key, ticket, sleep_delta):
threading.Thread.__init__(self)
self.queue = queue
self.gw_addr = gw_addr # url address of the GW
self.gw_id = gw_id # uuid of the gw
self.gw_a3c_pub_key = a3c_pub_key
self.session_key = session_key # session key computed from the handshake with the A3C_GW
self.ticket = ticket # the ticket that authenticates the DHM as valid for session establishment
self.sleep_delta = sleep_delta # time interval between retrying to communicate with the GW in case it failed before
#self.delta = base64.b64decode(ticket['public'])['expiration_date'] # duration of the session in seconds
self.r1 = os.urandom(16) # nonce to send to gw
self.derived_key = None
def run(self):
while True:
#print("# Starting DHM <-> GW session...")
logger.info("Starting DHM <-> GW session. GW UUID " + self.gw_id)
req_body = {
'ticket' : self.ticket,
'public_key' : base64.b64encode(serializeKey(self.gw_a3c_pub_key)),
'nonce' : base64.b64encode(self.r1)
}
data = parse.urlencode(req_body).encode()
#req = request.Request(self.gw_addr+"/dhmSessionSetup/", data)
req = request.Request(GW_ADDR+":"+GW_PORT+"/dhmSessionSetup/", data)
response = ast.literal_eval(request.urlopen(req).read().decode())
#print(response)
r2 = base64.b64decode(response['nonce'])
# compute derived key K'
self.derived_key = digestMD5(self.session_key, [self.r1, r2])
#print("Derived session key (K') with GW")
#for b in self.derived_key:
# print(int(b))
# validate the sent nonce r1 by decrypting it
recv_r1 = decryptAES(response['enc_nonce'][0],self.derived_key,response['enc_nonce'][1])
# encrypt the received nonce r2
# these last two steps are required to ensure that targets do not create a session
# for an attacker using a stolen ticket
data_final = { 'nonce' : base64.b64encode(str(encryptAES(self.derived_key, r2)).encode()) }
req = request.Request(GW_ADDR+":"+GW_PORT+"/dhmSessionSetup/validation/", parse.urlencode(data_final).encode()) #######################
request.urlopen(req)
#response =... # is there a response to this message ?
#print("## Session established successfully with the target GW")
logger.info("Successfully established session with GW UUID: " + self.gw_id)
# now that the session is established the DHM sends authentication tokens for
# the GW to be able to locate the devices and authenticate itself towards them
# fetch DHs managed by this gateway
# and generate the respective access token
logger.info("Fetching DHs data to configure target GW UUID " + self.gw_id)
managed_dhs = []
auth_data = {} # authentication data containing the dh ticket and its respective session key with the gw encrypted with gw public key
for device in dh_list:
if(device['master_gw_uuid'] == self.gw_id):
managed_dhs.append(device)
ticket, key = authTicketGen(device, self.derived_key, self.gw_id)
gw_pub_key = [g for g in gw_list if g['uuid'] == self.gw_id][0]['pub_key']
#auth_data[device['uuid']] = [ ticket, encryptRSA(loadKey(gw_pub_key.encode()), key)]
enc_key, iv = encryptAES(key, self.derived_key)
#print("AES Encrypted Key")
#for b in enc_key:
# print(int(b))
#print("IV")
#for b in iv:
# print(int(b))
#print("KEY used in AES cipher")
#for b in self.derived_key:
# print(int(b))
auth_data[device['uuid']] = [ ticket, enc_key, iv ]
d = base64.b64encode(str(auth_data).encode())
# hmac is generated over the base 64 encode in order to avoid dictionary rearrangements/disparities
# at the gw endpoint, resulting in different hmacs
hmac = generateHMAC(self.derived_key, d)
request_data = {
'data' : d,
'signature' : base64.b64encode(hmac)
}
logger.info("Sending configuration tickets to target GW UUID " + self.gw_id)
req = request.Request(GW_ADDR+":"+GW_PORT+"/dhTickets/", parse.urlencode(request_data).encode()) ########################################################
response = request.urlopen(req).read().decode()
logger.info(response)
# thread blocks until receiving any new data
#job = self.queue.get()
#if job == "Session_renew":
# # do work
# elif job is None:
# break
return
#time.sleep(self.sleep_delta)
# thread responsible for managing the connections and sessions between the DHM and each A3C GW server
class DHM_A3C_Session(threading.Thread):
def __init__(self, a3c_uuid, addr, a3c_pub_key, target_gw_id, target_gw_addr, delta, sleep_delta):
threading.Thread.__init__(self)
self.a3c_uuid = a3c_uuid # uuid of the a3c server
self.a3c_addr = addr # url address for the a3c
self.a3c_pub_key = loadKey(a3c_pub_key) # public key of the a3c
self.target_gw_id = target_gw_id
self.target_gw_addr = target_gw_addr
self.delta = delta # duration of the session in seconds.
self.sleep_delta = sleep_delta # time interval between retrying to communicate with the A3C in case it failed before
self.r1 = os.urandom(16) # nonce to send to A3C
#self.session_key = None
def run(self):
while True:
try:
#print("# Fecthing ticket from A3C_GW...")
logger.info("Fetching ticket from A3C GW with uuid " + self.a3c_uuid)
req_body = {
'id' : self.target_gw_id, # id of the target gateway to be accessed
'public_key' : base64.b64encode(server_pub_key_pem),
'nonce' : base64.b64encode(encryptRSA(self.a3c_pub_key, self.r1))
}
data = parse.urlencode(req_body).encode()
req = request.Request(self.a3c_addr+"/ticketFetch/", data)
#response = request.urlopen(req).read().decode()
response = ast.literal_eval(request.urlopen(req).read().decode())
#print(response)
# recover nonce2
nonce2 = decryptRSA(server_priv_key,base64.b64decode(response['nonce']))
ticket = response['ticket']
a3c_public_key = base64.b64decode(response['public_key'])
# compute session key using the retrieved nonce
session_key = bytes(map(operator.xor, self.r1, nonce2))
#print("## Ticket fetched successfully!")
logger.info("Successfully fetched ticket from " + self.a3c_uuid)
# start DHM-GW connection and session establishment
dhm_gw_session_queue = Queue()
dhm_gw_session = DHM_GW_Session(dhm_gw_session_queue,self.target_gw_addr, self.target_gw_id, self.a3c_pub_key, session_key, ticket, self.sleep_delta)
dhm_gw_session.start()
return
#Once the session expires the thread must restart
# < TODO >
except requests.exceptions.ConnectionError:
print("# Failed to connect to server \n## Trying again in {} seconds".format(self.sleep_delta))
#except Exception as exc:
# print(exc)
# print("# An error ocurred while registering in the server\n## Trying again in {} seconds".format(self.sleep_delta))
time.sleep(self.sleep_delta)
return
def main():
# server password input
# it will be used to load the server private key
#server_password = bytes(getpass.getpass(),'utf-8')
server_password = b"<PASSWORD>!\"#$%&/()="
#print("# IOT DHM server starting...")
logger.info("DHM server starting")
# init database
logger.info("Loading database")
global db_client
db_client = pymongo.MongoClient("mongodb://localhost:27017/")
db = db_client[db_client_name]
crypt_col = db["crypto_info"]
logger.info("Fetching hashed password from database")
for d in crypt_col.find({}):
db_hash = d['key']
db_salt = d['salt']
if not db_hash:
logger.critical("Password not found in local database")
return
if not db_salt:
logger.critical("Password salt value not found in local database ")
return
# check if password matches the salted hash stored in database
key = PBKDF2(server_password, db_salt)
if(not verifyPKBDF2(key, server_password, db_salt)):
#print("ERROR: Invalid password!")
logger.critical("Provided bootstrap password does not match stored password")
return
else:
#print("## Correct password")
logger.info("Correct bootstrap password")
# load server key_pair from file
global server_pub_key_pem
server_pub_key_pem = loadKeyPEM("rsa_dhm","public",password=<PASSWORD>,path="")
if(server_pub_key_pem == -1):
logger.critical("Failed to load public key")
return
logger.info("Public key loaded successfully")
global server_priv_key
server_priv_key = loadKeyPEM("rsa_dhm","private",server_password,path="")
if(server_priv_key == -1):
logger.critical("Failed to load private key")
return
logger.info("Private key loaded successfully")
global server_pub_key
server_pub_key = loadKey(server_pub_key_pem)
#print("## Keys loaded successfully!")
# DHM id is equal to the digest of its public key
global server_id
server_id = digestMD5(server_pub_key_pem).hex()
#print("Server ID: " + server_id)
logger.info("Server ID : " + server_id)
## LOGIN procedure complete ##
# fetch info of DH this DHM manages from db
dh_col = db[col_dh]
global dh_list
for d in dh_col.find({}):
dh_list.append({
'uuid' : d['uuid'],
'pub_key' : d['pub_key'],
'ble_addr' : d['ble_addr'],
'master_gw_uuid' : d['master_gw_uuid']
})
#print(dh_list)
logger.info("Successfully loaded DHs info from local database")
# fetch GW info from db
gw_col = db[col_gw]
global gw_list
for d in gw_col.find({}):
gw_list.append({
'uuid' : d['uuid'],
'pub_key' : d['pub_key'],
'addr' : d['addr'],
'master_a3c_uuid' : d['master_a3c_uuid']
})
#print(gw_list)
logger.info("Successfully loaded GWs info from local database")
# fetch A3C GW info from database (address, uuid, public key)
a3c_gw_col = db[col_a3c_gw]
a3c_gw_list = []
for d in a3c_gw_col.find({}):
a3c_gw_list.append({
'uuid' : d['uuid'],
'pub_key' : d['pub_key'],
'addr' : d['addr']
})
#print(a3c_gw_list)
for device in dh_list:
target_gw_id = device['master_gw_uuid']
# get info of device gw
gw = [g for g in gw_list if g['uuid'] == target_gw_id ]
gw_a3c_server_uuid = gw[0]['master_a3c_uuid']
target_gw_addr = gw[0]['addr']
#print(target_gw_addr)
# get info of the master a3c of the target gw
a3c_gw = [a for a in a3c_gw_list if a['uuid'] == gw_a3c_server_uuid][0]
a3c_gw_address = a3c_gw['addr']
a3c_gw_pub_key = a3c_gw['pub_key']
## Start DHM-A3C session. # 1 session <=> 1 thread
logger.info("Starting session with GW A3C server: " + gw_a3c_server_uuid)
t = | |
#!/usr/bin/python
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse
import time
import numpy as np
import pandas as pd
import os
from builtins import range
from PIL import Image
import grpc
# Drop "from src.core import" because the import hierarchy in proto files
# will be removed in Makefile.clients
from tensorrtserver.api import api_pb2
from tensorrtserver.api import grpc_service_pb2
from tensorrtserver.api import grpc_service_pb2_grpc
from tensorrtserver.api import model_config_pb2
from tensorrtserver.api import request_status_pb2
from tensorrtserver.api import server_status_pb2
FLAGS = None
def _log(*args, **kwargs):
print("[Client]", *args, **kwargs)
def model_dtype_to_np(model_dtype):
if model_dtype == model_config_pb2.TYPE_BOOL:
return np.bool
elif model_dtype == model_config_pb2.TYPE_INT8:
return np.int8
elif model_dtype == model_config_pb2.TYPE_INT16:
return np.int16
elif model_dtype == model_config_pb2.TYPE_INT32:
return np.int32
elif model_dtype == model_config_pb2.TYPE_INT64:
return np.int64
elif model_dtype == model_config_pb2.TYPE_UINT8:
return np.uint8
elif model_dtype == model_config_pb2.TYPE_UINT16:
return np.uint16
elif model_dtype == model_config_pb2.TYPE_FP16:
return np.float16
elif model_dtype == model_config_pb2.TYPE_FP32:
return np.float32
elif model_dtype == model_config_pb2.TYPE_FP64:
return np.float64
return None
def parse_model(status, model_name, batch_size, verbose=False):
"""
Check the configuration of a model to make sure it meets the
requirements for an image classification network (as expected by
this client)
"""
server_status = status.server_status
if model_name not in server_status.model_status.keys():
raise Exception("unable to get status for '" + model_name + "'")
status = server_status.model_status[model_name]
config = status.config
if len(config.input) != 1:
raise Exception("expecting 1 input, got " + len(config.input))
if len(config.output) != 1:
raise Exception("expecting 1 output, got " + len(config.output))
input = config.input[0]
output = config.output[0]
if output.data_type != model_config_pb2.TYPE_FP32:
raise Exception("expecting output datatype to be TYPE_FP32, model '" +
model_name + "' output type is " +
model_config_pb2.DataType.Name(output.data_type))
# Output is expected to be a vector. But allow any number of
# dimensions as long as all but 1 is size 1 (e.g. { 10 }, { 1, 10
# }, { 10, 1, 1 } are all ok).
non_one_cnt = 0
for dim in output.dims:
if dim > 1:
non_one_cnt += 1
if non_one_cnt > 1:
raise Exception("expecting model output to be a vector")
# Model specifying maximum batch size of 0 indicates that batching
# is not supported and so the input tensors do not expect an "N"
# dimension (and 'batch_size' should be 1 so that only a single
# image instance is inferred at a time).
max_batch_size = config.max_batch_size
if max_batch_size == 0:
if batch_size != 1:
raise Exception("batching not supported for model '" + model_name + "'")
else: # max_batch_size > 0
if batch_size > max_batch_size:
raise Exception("expecting batch size <= " + max_batch_size +
" for model '" + model_name + "'")
# Model input must have 3 dims, either CHW or HWC
if len(input.dims) != 3:
raise Exception("expecting input to have 3 dimensions, model '" +
model_name + "' input has " << len(input.dims))
if ((input.format != model_config_pb2.ModelInput.FORMAT_NCHW) and
(input.format != model_config_pb2.ModelInput.FORMAT_NHWC)):
raise Exception("unexpected input format " + model_config_pb2.ModelInput.Format.Name(input.format) +
", expecting " +
model_config_pb2.ModelInput.Format.Name(model_config_pb2.ModelInput.FORMAT_NCHW) +
" or " +
model_config_pb2.ModelInput.Format.Name(model_config_pb2.ModelInput.FORMAT_NHWC))
if input.format == model_config_pb2.ModelInput.FORMAT_NHWC:
h = input.dims[0]
w = input.dims[1]
c = input.dims[2]
else:
c = input.dims[0]
h = input.dims[1]
w = input.dims[2]
output_size = 1
for dim in output.dims:
output_size = output_size * dim
output_size = output_size * np.dtype(model_dtype_to_np(output.data_type)).itemsize
return (input.name, output.name, c, h, w, input.format, model_dtype_to_np(input.data_type), output_size)
def preprocess(img, format, dtype, c, h, w, scaling):
"""
Pre-process an image to meet the size, type and format
requirements specified by the parameters.
"""
#np.set_printoptions(threshold='nan')
if c == 1:
sample_img = img.convert('L')
else:
sample_img = img.convert('RGB')
resized_img = sample_img.resize((h, w), Image.BILINEAR)
resized = np.array(resized_img)
if resized.ndim == 2:
resized = resized[:,:,np.newaxis]
typed = resized.astype(dtype)
if scaling == 'INCEPTION':
scaled = (typed / 128) - 1
elif scaling == 'VGG':
if c == 1:
scaled = typed - np.asarray((128,), dtype=dtype)
else:
scaled = typed - np.asarray((123, 117, 104), dtype=dtype)
else:
scaled = typed
# Swap to CHW if necessary
if format == model_config_pb2.ModelInput.FORMAT_NCHW:
ordered = np.transpose(scaled, (2, 0, 1))
else:
ordered = scaled
# Channels are in RGB order. Currently model configuration data
# doesn't provide any information as to other channel orderings
# (like BGR) so we just assume RGB.
return ordered
def postprocess(results, files, idx, batch_size, num_classes, verbose=False):
"""
Post-process results to show classifications.
"""
show_all = verbose or ((batch_size == 1) and (num_classes > 1))
if show_all:
if idx == 0:
print("Output probabilities:")
print("batch {}:".format(idx))
if len(results) != 1:
raise Exception("expected 1 result, got " + str(len(results)))
batched_result = results[0].batch_classes
if len(batched_result) != batch_size:
raise Exception("expected " + str(batch_size) +
" results, got " + str(len(batched_result)))
# For each result in the batch count the top prediction. Since we
# used the same image for every entry in the batch we expect the
# top prediction to be the same for each entry... but this code
# doesn't assume that.
counts = dict()
predictions = dict()
for (index, result) in enumerate(batched_result):
label = result.cls[0].label
if label not in counts:
counts[label] = 0
counts[label] += 1
predictions[label] = result.cls[0]
# If requested, print all the class results for the entry
if show_all:
if (index >= len(files)):
index = len(files) - 1
# Top 1, print compactly
if len(result.cls) == 1:
print("Image '{}': {} ({}) = {}".format(
files[index], result.cls[0].idx,
result.cls[0].label, result.cls[0].value))
else:
print("Image '{}':".format(files[index]))
for cls in result.cls:
print(" {} ({}) = {}".format(cls.idx, cls.label, cls.value))
# Summary
print("Prediction totals:")
for (label, cnt) in counts.items():
cls = predictions[label]
print("\tcnt={}\t({}) {}".format(cnt, cls.idx, cls.label))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--verbose', action="store_true", required=False, default=False,
help='Enable verbose output')
parser.add_argument('-m', '--model-name', type=str, required=True,
help='Name of model')
parser.add_argument('-x', '--model-version', type=str, required=False,
help='Version of model. Default is to use latest version.')
parser.add_argument('-b', '--batch-size', type=int, required=False, default=1,
help='Batch size. Default is 1.')
parser.add_argument('-c', '--classes', type=int, required=False, default=1,
help='Number of class results to report. Default is 1.')
parser.add_argument('-s', '--scaling', type=str, choices=['NONE', 'INCEPTION', 'VGG'],
required=False, default='NONE',
help='Type of scaling to apply to image pixels. Default is NONE.')
parser.add_argument('-u', '--url', type=str, required=False, default='localhost:8001',
help='Inference server URL. Default is localhost:8001.')
parser.add_argument('-p', '--preprocessed', type=str, required=False,
metavar='FILE', help='Write preprocessed image to specified file.')
parser.add_argument('--result-name', type=str, required=False,
help='Path to parquet file')
parser.add_argument('image_filename', type=str, nargs='?', default=None,
help='Input image.')
FLAGS = parser.parse_args()
# Create gRPC stub for communicating with the server
channel = grpc.insecure_channel(FLAGS.url)
grpc_stub = grpc_service_pb2_grpc.GRPCServiceStub(channel)
# Prepare request for Status gRPC
request = grpc_service_pb2.StatusRequest(model_name=FLAGS.model_name)
# Call and receive response from Status gRPC
response = grpc_stub.Status(request)
# Make sure the model matches our requirements, and get some
# properties of the model that we need for preprocessing
input_name, output_name, c, h, w, format, dtype, output_size = parse_model(
response, FLAGS.model_name, FLAGS.batch_size, FLAGS.verbose)
# Prepare request for Infer gRPC
# The meta data part can be reused across requests
request = grpc_service_pb2.InferRequest()
request.model_name = FLAGS.model_name
if FLAGS.model_version is None:
| |
#!/usr/bin/env python
# coding: utf-8
# # A Tutorial on HashMap
# (In addition to having fun) We write programs to solve real world problems. Data structures help us in representing and efficiently manipulating the data associated with these problems.
#
# Let us see if we can use any of the data structures that we already know to solve the following problem
# ### The Problem Scenario
#
# In a class of students, store heights for each student.
# The problem in itself is very simple. We have the data of heights of each student. We want to store it so that next time someone asks for height of a student, we can easily return the value. But how can we store these heights?
#
# Obviously we can use a database and store these values. But, let's say we don't want to do that for now. We want to use a data structure to store these values as part of our program. For the sake of simplicity, our problem is limited to storing heights of students. But you can certainly imagine scenarios where you have to store such `key-value` pairs and later on when someone gives you a `key`, you can efficiently return the corrresponding `value`.
#
# The class diagram for HashMaps would look something like this.
# In[ ]:
class HashMap:
def __init__(self):
self.num_entries = 0
def put(self, key, value):
pass
def get(self, key):
pass
def size(self):
return self.num_entries
# ### Arrays
#
# Can we use arrays to store `key-value` pairs?
#
# We can certainly use one array to store the names of the students and use another array to store their corresponding heights at the corresponding indices.
#
# What will be the time complexity in this scenario?
#
# To obtain height of a student, say `Potter, Harry`, we will have to traverse the entire array and check if the value at a particular index matches `Potter, Harry`. Once we find the index in which this value is stored, we can use this index to obtain the height from the second array.
#
# Thus, because of this traveral, complexity for `get()` operation becomes $O(n)$. Even if we maintain a sorted array, the operation will not take less than $O(log(n))$ complexity.
#
# What happens if a student leaves a class? We will have to delete the entry corresponding to the student from both the arrays.
#
# This would require another traversal to find the index. And then we will have to shift our entire array to fill this gap. Again, the time complexity of operation becomes $O(n)$
#
#
# ### Linked List
#
# Is it possible to use linked lists for this problem?
#
# We can certainly modify our `LinkedListNode` to have two different value attributes - one for name of the student and the other for height.
#
# But we again face the same problem. In the worst case, we will have to traverse the entire linked list to find the height of a particular student. Once again, the cost of operation becomes $O(n)$.
# ### Stacks and Queues
#
# Stacks and Queues are LIFO and FIFO data structures respectively. Can you think why they too do not make a good choice for storing `key-value` pairs?
# ------------------------------------------------------------------------
# Can we do better? Can you think of any data structure that allows for fast `get()` operation?
#
# Let us circle back to arrays.
#
# When we obtain the element present at a particular index using something like `arr[3]`, the operation takes constant i.e. `O(1)` time.
#
# *For review - Does this constant time operation require further explanation?*
#
#
#
# If we think about `array indices as keys` and the `element present at those indices as values`, we can fairly conclude that at least for non-zero integer `keys`, we can use arrays.
#
# However, like our current problem statement, we may not always have integer keys.
#
# `If only we had a function that could give us arrays indices for any key value that we gave it!`
# ## Hash Functions
#
# Simply put, hash functions are these really incredible `magic` functions which can map data of any size to a fixed size data. This fixed sized data is often called hash code or hash digest.
#
# Let's create our own hash function to store strings
# In[ ]:
def hash_function(string):
pass
# For a given string, say `abcd`, a very simple hash function can be sum of corresponding ASCII values.
#
# *Note: you can use `ord(character`) to determine ASCII value of a particular character e.g. `ord('a') will return 97`*
#
# In[ ]:
def hash_function(string):
hash_code = 0
for character in string:
hash_code += ord(character)
return hash_code
# In[ ]:
hash_code_1 = hash_function("abcd")
print(hash_code_1)
# Looks like our hash function is working fine. But is this really a good hash function?
#
# For starters, it will return the same value for `abcd` and `bcda`. Do we want that? We can create 24 different permutations for the string `abcd` and each will have the same value. We cannot put 24 values to one index.
#
# Obviously, this makes it clear that our hash function must return unique values for unique objects.
#
# When two different inputs produce the same output, then we have something called a `collision`. An ideal hash function must be immune from producing collisions.
#
# Let's think something else.
#
# Can product help? We will again run in the same problem.
#
# The honest answer is that we have differernt hash functions for different types of keys. The hash function for integers will be different from the hash function for strings, which again, will be different for some object of a class that you created.
#
# However, let's try to continue with our problem and try to come up with a hash function for strings.
# ## Hash Function for Strings
# For a string, say `abcde`, a very effective function is treating this as number of prime number base `p`.
# Let's elaborate this statement.
#
# For a number, say `578`, we can represent this number in base 10 number system as $$5*10^2 + 7*10^1 + 8*10^0$$
#
# Similarly, we can treat `abcde` in base `p` as $$a * p^4 + b * p^3 + c * p^2 + d * p^1 + e * p^0$$
#
# Here, we replace each character with its corresponding ASCII value.
#
# A lot of research goes into figuring out good hash functions and this hash function is one of the most popular functions used for strings. We use prime numbers because the provide a good distribution. The most common prime numbers used for this function are 31 and 37.
# Thus, using this algorithm, we can get a corresponding integer value for each string key and **use it as an index** of an array, say `bucket array`. It is not a special array. We simply choose to give a special name to arrays for this purpose. Each entry in this `bucket array` is called a `bucket` and the index in which we store a bucket is called `bucket index`. You can visualize the `bucket array` as shown in the figure below:
#
# <img style="float: center;height:500px;" src="bucket0.png"><br>
#
# Let's add these details to our class.
# In[ ]:
class HashMap:
def __init__(self, initial_size=10):
self.bucket_array = [None for _ in range(initial_size)]
self.p = 37 # a prime numbers
self.num_entries = 0
def put(self, key, value):
pass
def get(self, key):
pass
# Returns the bucket_index
def get_bucket_index(self, key):
return self.get_hash_code(key) # The returned hash code will be the bucket_index
# Returns the hash code
def get_hash_code(self, key):
key = str(key)
# represents (self.p^0) which is 1
current_coefficient = | |
= start_point
continue
# Draw Line
match = self.draw_line_re.search(pline)
if match:
current_subpath = 'lines'
x = float(match.group(1)) + offset_geo[0]
y = float(match.group(2)) + offset_geo[1]
pt = (x * self.point_to_unit_factor * scale_geo[0],
y * self.point_to_unit_factor * scale_geo[1])
subpath['lines'].append(pt)
current_point = pt
continue
# Draw Bezier 'c'
match = self.draw_arc_3pt_re.search(pline)
if match:
current_subpath = 'bezier'
start = current_point
x = float(match.group(1)) + offset_geo[0]
y = float(match.group(2)) + offset_geo[1]
c1 = (x * self.point_to_unit_factor * scale_geo[0],
y * self.point_to_unit_factor * scale_geo[1])
x = float(match.group(3)) + offset_geo[0]
y = float(match.group(4)) + offset_geo[1]
c2 = (x * self.point_to_unit_factor * scale_geo[0],
y * self.point_to_unit_factor * scale_geo[1])
x = float(match.group(5)) + offset_geo[0]
y = float(match.group(6)) + offset_geo[1]
stop = (x * self.point_to_unit_factor * scale_geo[0],
y * self.point_to_unit_factor * scale_geo[1])
subpath['bezier'].append([start, c1, c2, stop])
current_point = stop
continue
# Draw Bezier 'v'
match = self.draw_arc_2pt_c1start_re.search(pline)
if match:
current_subpath = 'bezier'
start = current_point
x = float(match.group(1)) + offset_geo[0]
y = float(match.group(2)) + offset_geo[1]
c2 = (x * self.point_to_unit_factor * scale_geo[0],
y * self.point_to_unit_factor * scale_geo[1])
x = float(match.group(3)) + offset_geo[0]
y = float(match.group(4)) + offset_geo[1]
stop = (x * self.point_to_unit_factor * scale_geo[0],
y * self.point_to_unit_factor * scale_geo[1])
subpath['bezier'].append([start, start, c2, stop])
current_point = stop
continue
# Draw Bezier 'y'
match = self.draw_arc_2pt_c2stop_re.search(pline)
if match:
start = current_point
x = float(match.group(1)) + offset_geo[0]
y = float(match.group(2)) + offset_geo[1]
c1 = (x * self.point_to_unit_factor * scale_geo[0],
y * self.point_to_unit_factor * scale_geo[1])
x = float(match.group(3)) + offset_geo[0]
y = float(match.group(4)) + offset_geo[1]
stop = (x * self.point_to_unit_factor * scale_geo[0],
y * self.point_to_unit_factor * scale_geo[1])
subpath['bezier'].append([start, c1, stop, stop])
current_point = stop
continue
# Draw Rectangle 're'
match = self.rect_re.search(pline)
if match:
current_subpath = 'rectangle'
x = (float(match.group(1)) + offset_geo[0]) * self.point_to_unit_factor * scale_geo[0]
y = (float(match.group(2)) + offset_geo[1]) * self.point_to_unit_factor * scale_geo[1]
width = (float(match.group(3)) + offset_geo[0]) * self.point_to_unit_factor * scale_geo[0]
height = (float(match.group(4)) + offset_geo[1]) * self.point_to_unit_factor * scale_geo[1]
pt1 = (x, y)
pt2 = (x + width, y)
pt3 = (x + width, y + height)
pt4 = (x, y + height)
subpath['rectangle'] += [pt1, pt2, pt3, pt4, pt1]
current_point = pt1
continue
# Detect clipping path set
# ignore this and delete the current subpath
match = self.clip_path_re.search(pline)
if match:
subpath['lines'] = []
subpath['bezier'] = []
subpath['rectangle'] = []
# it means that we've already added the subpath to path and we need to delete it
# clipping path is usually either rectangle or lines
if close_subpath is True:
close_subpath = False
if current_subpath == 'lines':
path['lines'].pop(-1)
if current_subpath == 'rectangle':
path['rectangle'].pop(-1)
continue
# Close SUBPATH
match = self.end_subpath_re.search(pline)
if match:
close_subpath = True
if current_subpath == 'lines':
subpath['lines'].append(start_point)
# since we are closing the subpath add it to the path, a path may have chained subpaths
path['lines'].append(copy(subpath['lines']))
subpath['lines'] = []
elif current_subpath == 'bezier':
# subpath['bezier'].append(start_point)
# since we are closing the subpath add it to the path, a path may have chained subpaths
path['bezier'].append(copy(subpath['bezier']))
subpath['bezier'] = []
elif current_subpath == 'rectangle':
# subpath['rectangle'].append(start_point)
# since we are closing the subpath add it to the path, a path may have chained subpaths
path['rectangle'].append(copy(subpath['rectangle']))
subpath['rectangle'] = []
continue
# PATH PAINTING #
# Detect Stroke width / aperture
match = self.strokewidth_re.search(pline)
if match:
size = float(match.group(1))
continue
# Detect No_Op command, ignore the current subpath
match = self.no_op_re.search(pline)
if match:
subpath['lines'] = []
subpath['bezier'] = []
subpath['rectangle'] = []
continue
# Stroke the path
match = self.stroke_path__re.search(pline)
if match:
# scale the size here; some PDF printers apply transformation after the size is declared
applied_size = size * scale_geo[0] * self.point_to_unit_factor
path_geo = []
if current_subpath == 'lines':
if path['lines']:
for subp in path['lines']:
geo = copy(subp)
try:
geo = LineString(geo).buffer((float(applied_size) / 2),
resolution=self.step_per_circles)
path_geo.append(geo)
except ValueError:
pass
# the path was painted therefore initialize it
path['lines'] = []
else:
geo = copy(subpath['lines'])
try:
geo = LineString(geo).buffer((float(applied_size) / 2), resolution=self.step_per_circles)
path_geo.append(geo)
except ValueError:
pass
subpath['lines'] = []
if current_subpath == 'bezier':
if path['bezier']:
for subp in path['bezier']:
geo = []
for b in subp:
geo += self.bezier_to_points(start=b[0], c1=b[1], c2=b[2], stop=b[3])
try:
geo = LineString(geo).buffer((float(applied_size) / 2),
resolution=self.step_per_circles)
path_geo.append(geo)
except ValueError:
pass
# the path was painted therefore initialize it
path['bezier'] = []
else:
geo = []
for b in subpath['bezier']:
geo += self.bezier_to_points(start=b[0], c1=b[1], c2=b[2], stop=b[3])
try:
geo = LineString(geo).buffer((float(applied_size) / 2), resolution=self.step_per_circles)
path_geo.append(geo)
except ValueError:
pass
subpath['bezier'] = []
if current_subpath == 'rectangle':
if path['rectangle']:
for subp in path['rectangle']:
geo = copy(subp)
try:
geo = LineString(geo).buffer((float(applied_size) / 2),
resolution=self.step_per_circles)
path_geo.append(geo)
except ValueError:
pass
# the path was painted therefore initialize it
path['rectangle'] = []
else:
geo = copy(subpath['rectangle'])
try:
geo = LineString(geo).buffer((float(applied_size) / 2), resolution=self.step_per_circles)
path_geo.append(geo)
except ValueError:
pass
subpath['rectangle'] = []
# store the found geometry
found_aperture = None
if apertures_dict:
for apid in apertures_dict:
# if we already have an aperture with the current size (rounded to 5 decimals)
if apertures_dict[apid]['size'] == round(applied_size, 5):
found_aperture = apid
break
if found_aperture:
for pdf_geo in path_geo:
if isinstance(pdf_geo, MultiPolygon):
for poly in pdf_geo:
new_el = {'solid': poly, 'follow': poly.exterior}
apertures_dict[copy(found_aperture)]['geometry'].append(deepcopy(new_el))
else:
new_el = {'solid': pdf_geo, 'follow': pdf_geo.exterior}
apertures_dict[copy(found_aperture)]['geometry'].append(deepcopy(new_el))
else:
if str(aperture) in apertures_dict.keys():
aperture += 1
apertures_dict[str(aperture)] = {}
apertures_dict[str(aperture)]['size'] = round(applied_size, 5)
apertures_dict[str(aperture)]['type'] = 'C'
apertures_dict[str(aperture)]['geometry'] = []
for pdf_geo in path_geo:
if isinstance(pdf_geo, MultiPolygon):
for poly in pdf_geo:
new_el = {'solid': poly, 'follow': poly.exterior}
apertures_dict[str(aperture)]['geometry'].append(deepcopy(new_el))
else:
new_el = {'solid': pdf_geo, 'follow': pdf_geo.exterior}
apertures_dict[str(aperture)]['geometry'].append(deepcopy(new_el))
else:
apertures_dict[str(aperture)] = {}
apertures_dict[str(aperture)]['size'] = round(applied_size, 5)
apertures_dict[str(aperture)]['type'] = 'C'
apertures_dict[str(aperture)]['geometry'] = []
for pdf_geo in path_geo:
if isinstance(pdf_geo, MultiPolygon):
for poly in pdf_geo:
new_el = {'solid': poly, 'follow': poly.exterior}
apertures_dict[str(aperture)]['geometry'].append(deepcopy(new_el))
else:
new_el = {'solid': pdf_geo, 'follow': pdf_geo.exterior}
apertures_dict[str(aperture)]['geometry'].append(deepcopy(new_el))
continue
# Fill the path
match = self.fill_path_re.search(pline)
if match:
# scale the size here; some PDF printers apply transformation after the size is declared
applied_size = size * scale_geo[0] * self.point_to_unit_factor
path_geo = []
if current_subpath == 'lines':
if path['lines']:
for subp in path['lines']:
geo = copy(subp)
# close the subpath if it was not closed already
if close_subpath is False:
geo.append(geo[0])
try:
geo_el = Polygon(geo).buffer(0.0000001, resolution=self.step_per_circles)
path_geo.append(geo_el)
except ValueError:
pass
# the path was painted therefore initialize it
path['lines'] = []
else:
geo = copy(subpath['lines'])
# close the subpath if it was not closed already
if close_subpath is False:
geo.append(start_point)
try:
geo_el = Polygon(geo).buffer(0.0000001, resolution=self.step_per_circles)
path_geo.append(geo_el)
except ValueError:
pass
subpath['lines'] = []
if current_subpath == 'bezier':
geo = []
if path['bezier']:
for subp in path['bezier']:
for b in subp:
geo += self.bezier_to_points(start=b[0], c1=b[1], c2=b[2], stop=b[3])
# close the subpath if it was not closed already
if close_subpath is False:
new_g = geo[0]
geo.append(new_g)
try:
geo_el = Polygon(geo).buffer(0.0000001, resolution=self.step_per_circles)
path_geo.append(geo_el)
except ValueError:
pass
# the path was painted therefore initialize it
path['bezier'] = []
else:
for b in subpath['bezier']:
geo += self.bezier_to_points(start=b[0], c1=b[1], c2=b[2], stop=b[3])
if close_subpath is False:
geo.append(start_point)
try:
geo_el = Polygon(geo).buffer(0.0000001, resolution=self.step_per_circles)
path_geo.append(geo_el)
except ValueError:
pass
subpath['bezier'] = []
if current_subpath == 'rectangle':
if path['rectangle']:
for subp in path['rectangle']:
geo = copy(subp)
# # close the subpath if it was not closed already
# if close_subpath is False and start_point is not None:
# geo.append(start_point)
try:
geo_el = Polygon(geo).buffer(0.0000001, resolution=self.step_per_circles)
path_geo.append(geo_el)
except ValueError:
pass
# the path was painted therefore initialize it
path['rectangle'] = []
else:
geo = copy(subpath['rectangle'])
# # close the subpath if it was not closed already
# if close_subpath is False and start_point is not None:
# geo.append(start_point)
try:
geo_el = Polygon(geo).buffer(0.0000001, resolution=self.step_per_circles)
path_geo.append(geo_el)
except ValueError:
pass
subpath['rectangle'] = []
# we finished painting and also closed the path if it was the case
close_subpath = True
# in case that a color change to white (transparent) occurred
if flag_clear_geo is True:
# if there was a fill color change we look for circular geometries from which we can make
# drill holes for the Excellon file
if current_subpath == 'bezier':
# if there | |
Set the new value
if self.myProperties[name]['multivalue']:
# Check if the new value is s list.
if type(value) != list:
raise TypeError(C.make_error('ATTRIBUTE_INVALID_LIST', name))
new_value = value
else:
new_value = [value]
# Eventually fixup value from incoming JSON string
s_type = self.myProperties[name]['type']
try:
new_value = self._objectFactory.getAttributeTypes()[s_type].fixup(new_value)
except Exception:
raise TypeError(C.make_error('ATTRIBUTE_INVALID', name, type=s_type))
# Check if the new value is valid
#pylint: disable=E1101
if not self._objectFactory.getAttributeTypes()[s_type].is_valid_value(new_value):
raise TypeError(C.make_error('ATTRIBUTE_INVALID', name, type=s_type))
# Validate value
if self.myProperties[name]['validator']:
props_copy = copy.deepcopy(self.myProperties)
res, error = self.__processValidator(self.myProperties[name]['validator'], name, new_value, props_copy)
if not res:
if len(error):
raise ValueError(C.make_error('ATTRIBUTE_CHECK_FAILED',
name, details=error))
else:
raise ValueError(C.make_error('ATTRIBUTE_CHECK_FAILED', name))
# Ensure that unique values stay unique. Let the backend test this.
#if self.myProperties[name]['unique']:
# backendI = ObjectBackendRegistry.getBackend(self.myProperties[name]['backend'])
# if not backendI.is_uniq(name, new_value):
# raise ObjectException(C.make_error('ATTRIBUTE_NOT_UNIQUE', name, value=value))
# Assign the properties new value.
self.myProperties[name]['value'] = new_value
self.log.debug("updated property value of [%s|%s] %s:%s" % (type(self).__name__, self.uuid, name, new_value))
# Update status if there's a change
t = self.myProperties[name]['type']
current = copy.deepcopy(self.myProperties[name]['value'])
#pylint: disable=E1101
if not self._objectFactory.getAttributeTypes()[t].values_match(self.myProperties[name]['value'], self.myProperties[name]['orig_value']):
self.myProperties[name]['status'] = STATUS_CHANGED
self.myProperties[name]['last_value'] = current
else:
raise AttributeError(C.make_error('ATTRIBUTE_NOT_FOUND', name))
def _getattr_(self, name):
"""
The getter method object attributes.
(It differentiates between object attributes and class-members)
"""
methods = getattr(self, '__methods')
# If the requested property exists in the object-attributes, then return it.
if name in self.myProperties:
# We can have single and multivalues, return the correct type here.
value = None
if self.myProperties[name]['multivalue']:
value = self.myProperties[name]['value']
else:
if len(self.myProperties[name]['value']):
value = self.myProperties[name]['value'][0]
return value
# The requested property-name seems to be a method, return the method reference.
elif name in methods:
def m_call(*args, **kwargs):
return methods[name]['ref'](self, *args, **kwargs)
return m_call
else:
raise AttributeError(C.make_error('ATTRIBUTE_NOT_FOUND', name))
def getTemplate(self, theme="default"):
"""
Return the template data - if any. Else None.
"""
return Object.getNamedTemplate(self.env, self._templates, theme)
@staticmethod
def getNamedTemplate(env, templates, theme="default"):
"""
Return the template data - if any. Else None.
"""
ui = []
# If there's a template file, try to find it
if templates:
for template in templates:
path = None
# Absolute path
if template.startswith(os.path.sep):
path = template
# Relative path
else:
# Find path
path = pkg_resources.resource_filename('clacks.agent', os.path.join('data', 'templates', theme, template)) #@UndefinedVariable
if not os.path.exists(path):
path = os.path.join(env.config.getBaseDir(), 'templates', theme, template)
if not os.path.exists(path):
path = pkg_resources.resource_filename('clacks.agent', os.path.join('data', 'templates', "default", template)) #@UndefinedVariable
if not os.path.exists(path):
path = os.path.join(env.config.getBaseDir(), 'templates', "default", template)
if not os.path.exists(path):
return None
with open(path, "r") as f:
_ui = f.read()
# Build new merged resource element
root = etree.fromstring(_ui)
new_resources = []
resources = root.find("resources")
for include in resources.findall("include"):
rc = include.get("location")
location = os.path.join(os.path.dirname(path), rc)
if not os.path.exists(location):
raise IOError(C.make_error("NO_SUCH_RESOURCE", resource=location))
res = ""
with open(location, "r") as f:
res = f.read()
for resource in etree.fromstring(res).findall("qresource"):
files = []
prefix = resource.get("prefix")
for f in resource.findall("file"):
files.append(E.file(os.path.join(prefix, unicode(f.text))))
new_resources.append(E.resource(*files, location=rc))
root.replace(root.find("resources"), E.resources(*new_resources))
ui.append(etree.tostring(root))
return ui
def getAttrType(self, name):
"""
Return the type of a given object attribute.
"""
if name in self.myProperties:
return self.myProperties[name]['type']
raise AttributeError(C.make_error('ATTRIBUTE_NOT_FOUND', name))
def check(self, propsFromOtherExtensions=None):
"""
Checks whether everything is fine with the extension and its given values or not.
"""
if not propsFromOtherExtensions:
propsFromOtherExtensions = {}
# Create a copy to avoid touching the original values
props = copy.deepcopy(self.myProperties)
# Check if _mode matches with the current object type
#pylint: disable=E1101
if self._base_object and not self._mode in ['create', 'remove', 'update']:
raise ObjectException(C.make_error('OBJECT_MODE_NOT_AVAILABLE', mode=self._mode))
if not self._base_object and self._mode in ['create', 'remove']:
raise ObjectException(C.make_error('OBJECT_MODE_BASE_AVAILABLE', mode=self._mode))
# Check if we are allowed to create this base object on the given base
if self._base_object and self._mode == "create":
base_type = self.get_object_type_by_dn(self.dn)
if not base_type:
raise ObjectException(C.make_error('OBJECT_MODE_BASE_AVAILABLE', mode=self._mode))
if self.__class__.__name__ not in self._objectFactory.getAllowedSubElementsForObject(base_type):
raise ObjectException(C.make_error('OBJECT_NOT_SUB_FOR',
ext=self.__class__.__name__,
base=base_type))
# Transfer values form other commit processes into ourselfes
for key in self.attributesInSaveOrder:
if props[key]['foreign'] and key in propsFromOtherExtensions:
props[key]['value'] = propsFromOtherExtensions[key]['value']
# Transfer status into commit status
props[key]['commit_status'] = props[key]['status']
# Collect values by store and process the property filters
for key in self.attributesInSaveOrder:
# Skip foreign properties
if props[key]['foreign']:
continue
# Check if this attribute is blocked by another attribute and its value.
is_blocked = False
for bb in props[key]['blocked_by']:
if bb['value'] in props[bb['name']]['value']:
is_blocked = True
break
# Check if all required attributes are set. (Skip blocked once, they cannot be set!)
if not is_blocked and props[key]['mandatory'] and not len(props[key]['value']):
raise ObjectException(C.make_error('ATTRIBUTE_MANDATORY', key))
# Process each and every out-filter with a clean set of input values,
# to avoid that return-values overwrite themselves.
if len(props[key]['out_filter']):
self.log.debug(" found %s out-filter for %s" % (str(len(props[key]['out_filter'])), key,))
for out_f in props[key]['out_filter']:
self.__processFilter(out_f, key, props)
# Collect properties by backend
for prop_key in self.attributesInSaveOrder:
# Skip foreign properties
if props[prop_key]['foreign']:
continue
# Ensure that mandatory values are set
if props[prop_key]['mandatory'] and not len(props[prop_key]['value']):
raise ObjectException(C.make_error('ATTRIBUTE_MANDATORY', prop_key))
# Do not save untouched values
if not props[prop_key]['commit_status'] & STATUS_CHANGED:
continue
return props
def commit(self, propsFromOtherExtensions=None):
"""
Commits changes of an object to the corresponding backends.
"""
if not propsFromOtherExtensions:
propsFromOtherExtensions = {}
self.check(propsFromOtherExtensions)
self.log.debug("saving object modifications for [%s|%s]" % (type(self).__name__, self.uuid))
# Create a copy to avoid touching the original values
props = copy.deepcopy(self.myProperties)
# Transfer status into commit status
for key in self.attributesInSaveOrder:
props[key]['commit_status'] = props[key]['status']
# Transfer values form other commit processes into ourselfes
if props[key]['foreign'] and key in propsFromOtherExtensions:
props[key]['value'] = propsFromOtherExtensions[key]['value']
# Adapt property states
# Run this once - If any state was adapted, then run again to ensure
# that all dependencies are processed.
first = True
_max = 5
required = False
while (first or required) and _max:
first = False
required = False
_max -= 1
for key in self.attributesInSaveOrder:
# Adapt status from dependent properties.
for propname in props[key]['depends_on']:
old = props[key]['commit_status']
props[key]['commit_status'] |= props[propname]['status'] & STATUS_CHANGED
props[key]['commit_status'] |= props[propname]['commit_status'] & STATUS_CHANGED
if props[key]['commit_status'] != old:
required = True
# Collect values by store and process the property filters
collectedAttrs = {}
for key in self.attributesInSaveOrder:
# Skip foreign properties
if props[key]['foreign']:
continue
# Do not save untouched values
if not props[key]['commit_status'] & STATUS_CHANGED:
continue
# Get the new value for the property and execute the out-filter
self.log.debug("changed: %s" % (key,))
# Process each and every out-filter with a clean set of input values,
# to avoid that return-values overwrite themselves.
if len(props[key]['out_filter']):
self.log.debug(" found %s out-filter for %s" % (str(len(props[key]['out_filter'])), key,))
for out_f in props[key]['out_filter']:
self.__processFilter(out_f, key, props)
# Collect properties by backend
for prop_key in self.attributesInSaveOrder:
# Skip foreign properties
if props[prop_key]['foreign']:
continue
# Do not save untouched values
if not props[prop_key]['commit_status'] & STATUS_CHANGED:
continue
collectedAttrs[prop_key] = props[prop_key]
# Create a backend compatible list of all changed attributes.
toStore = {}
for prop_key in collectedAttrs:
# Collect properties by backend
for be in props[prop_key]['backend']:
if not be in toStore:
toStore[be] = {}
# Convert the properities type to the required format - if its not of the expected type.
be_type = collectedAttrs[prop_key]['backend_type']
s_type = collectedAttrs[prop_key]['type']
if not self._objectFactory.getAttributeTypes()[be_type].is_valid_value(collectedAttrs[prop_key]['value']):
collectedAttrs[prop_key]['value'] = self._objectFactory.getAttributeTypes()[s_type].convert_to(
be_type, collectedAttrs[prop_key]['value'])
# Append entry to the to-be-stored list
toStore[be][prop_key] = {'foreign': collectedAttrs[prop_key]['foreign'],
'orig': collectedAttrs[prop_key]['in_value'],
'value': collectedAttrs[prop_key]['value'],
'type': collectedAttrs[prop_key]['backend_type']}
# We may have a plugin without any attributes, like the group asterisk extension, in
# this case we've to update the object despite of the lack of properties.
if not len(toStore) and self._backend:
toStore[self._backend] = {}
# Leave the show if there's nothing to do
tmp = {}
for key, value in toStore.items():
# Skip NULL backend. Nothing to save, anyway.
if key == "NULL":
continue
tmp[key] = value
toStore = tmp
# Skip the whole process if there's no change at all
if not toStore:
return {}
# Update references using the toStore information
changes = {}
for be in toStore:
changes.update(toStore[be])
self.update_refs(changes)
# Handle by backend
p_backend = getattr(self, '_backend')
obj = self
zope.event.notify(ObjectChanged("pre %s" % self._mode, obj))
# Call pre-hooks now
if self._mode in ["extend", "create"]:
self.__execute_hook("PreCreate")
if self._mode in ["update"]:
self.__execute_hook("PreModify")
# First, take care about the primary backend...
| |
<reponame>zyberguy/cord19
import pickle
import re
import time
from pathlib import Path, PurePath
import ipywidgets as widgets
import numpy as np
import pandas as pd
import requests
from IPython.display import display, clear_output
from rank_bm25 import BM25Okapi
from requests import HTTPError
from cord.core import render_html, show_common, describe_dataframe, is_kaggle, CORD_CHALLENGE_PATH, \
find_data_dir, SARS_DATE, SARS_COV_2_DATE, listify
from cord.dates import add_date_diff
from cord.jsonpaper import load_json_paper, PDF_JSON, PMC_JSON, \
get_json_paths
from cord.text import preprocess, shorten, summarize
from cord.vectors import show_2d_chart, similar_papers
_MINIMUM_SEARCH_SCORE = 2
def get(url, timeout=6):
try:
r = requests.get(url, timeout=timeout)
return r.text
except ConnectionError:
print(f'Cannot connect to {url}')
print(f'Remember to turn Internet ON in the Kaggle notebook settings')
except HTTPError:
print('Got http error', r.status, r.text)
_DISPLAY_COLS = ['sha', 'title', 'abstract', 'publish_time', 'authors', 'has_text']
_RESEARCH_PAPERS_SAVE_FILE = 'ResearchPapers.pickle'
_COVID = ['sars-cov-2', '2019-ncov', 'covid-19', 'covid-2019', 'wuhan', 'hubei', 'coronavirus']
# Convert the doi to a url
def doi_url(d):
if not d:
return '#'
return f'http://{d}' if d.startswith('doi.org') else f'http://doi.org/{d}'
_abstract_terms_ = '(Publisher|Abstract|Summary|BACKGROUND|INTRODUCTION)'
# Some titles are is short and unrelated to viruses
# This regex keeps some short titles if they seem relevant
_relevant_re_ = '.*vir.*|.*sars.*|.*mers.*|.*corona.*|.*ncov.*|.*immun.*|.*nosocomial.*'
_relevant_re_ = _relevant_re_ + '.*epidem.*|.*emerg.*|.*vacc.*|.*cytokine.*'
def remove_common_terms(abstract):
return re.sub(_abstract_terms_, '', abstract)
def start(data):
return data.copy()
def clean_title(data):
# Set junk titles to NAN
title_relevant = data.title.fillna('').str.match(_relevant_re_, case=False)
title_short = data.title.fillna('').apply(len) < 30
title_junk = title_short & ~title_relevant
data.loc[title_junk, 'title'] = ''
return data
def clean_abstract(data):
# Set unknowns to NAN
abstract_unknown = data.abstract == 'Unknown'
data.loc[abstract_unknown, 'abstract'] = np.nan
# Fill missing abstract with the title
data.abstract = data.abstract.fillna(data.title)
# Remove common terms like publisher
data.abstract = data.abstract.fillna('').apply(remove_common_terms)
# Remove the abstract if it is too common
common_abstracts = show_common(data, 'abstract').query('abstract > 2') \
.reset_index().query('~(index =="")')['index'].tolist()
data.loc[data.abstract.isin(common_abstracts), 'abstract'] = ''
return data
def drop_missing(data):
missing = (data.published.isnull()) & \
(data.sha.isnull()) & \
(data.title == '') & \
(data.abstract == '')
return data[~missing].reset_index(drop=True)
def fill_nulls(data):
data.authors = data.authors.fillna('')
data.doi = data.doi.fillna('')
data.journal = data.journal.fillna('')
data.abstract = data.abstract.fillna('')
return data
def rename_publish_time(data):
return data.rename(columns={'publish_time': 'published'})
COVID_TERMS = ['covid', 'sars-?n?cov-?2', '2019-ncov', 'novel coronavirus', 'sars coronavirus 2']
COVID_SEARCH = f".*({'|'.join(COVID_TERMS)})"
NOVEL_CORONAVIRUS = '.*novel coronavirus'
WUHAN_OUTBREAK = 'wuhan'
def tag_covid(data):
"""
Tag all the records that match covid
:param data:
:return: data
"""
abstract = data.abstract.fillna('')
since_covid = (data.published > SARS_COV_2_DATE) | (data.published.isnull())
covid_term_match = since_covid & abstract.str.match(COVID_SEARCH, case=False)
wuhan_outbreak = since_covid & abstract.str.match('.*(wuhan|hubei)', case=False)
covid_match = covid_term_match | wuhan_outbreak
data['covid_related'] = False
data.loc[covid_match, 'covid_related'] = True
return data
def tag_virus(data):
VIRUS_SEARCH = f".*(virus|viruses|viral)"
viral_cond = data.abstract.str.match(VIRUS_SEARCH, case=False)
data['virus'] = False
data.loc[viral_cond, 'virus'] = True
return data
def tag_coronavirus(data):
corona_cond = data.abstract.str.match(".*corona", case=False)
data['coronavirus'] = False
data.loc[corona_cond, 'coronavirus'] = True
return data
def tag_sars(data):
sars_cond = data.abstract.str.match(".*sars", case=False)
sars_not_covid = ~(data.covid_related) & (sars_cond)
data['sars'] = False
data.loc[sars_not_covid, 'sars'] = True
return data
def apply_tags(data):
print('Applying tags to metadata')
data = data.pipe(tag_covid) \
.pipe(tag_virus) \
.pipe(tag_coronavirus) \
.pipe(tag_sars)
return data
def clean_metadata(metadata):
print('Cleaning metadata')
return metadata.pipe(start) \
.pipe(clean_title) \
.pipe(clean_abstract) \
.pipe(rename_publish_time) \
.pipe(add_date_diff) \
.pipe(drop_missing) \
.pipe(fill_nulls) \
.pipe(apply_tags)
def get_json_path(data_path, full_text_file, sha, pmcid):
if pmcid and isinstance(pmcid, str):
return Path(data_path) / full_text_file / full_text_file / PMC_JSON / f'{pmcid}.xml.json'
elif sha and isinstance(sha, str):
return Path(data_path) / full_text_file / full_text_file / PDF_JSON / f'{sha}.json'
def get_pdf_json_path(data_path, full_text_file, sha):
"""
:return: The path to the json file if the sha is present
"""
if sha and isinstance(sha, str):
return Path(data_path) / full_text_file / full_text_file / PDF_JSON / f'{sha}.json'
def get_pmcid_json_path(data_path, full_text_file, pmcid):
"""
:return: the path to the json file if the pmcid json is available
"""
if pmcid and isinstance(pmcid, str):
return Path(data_path) / full_text_file / full_text_file / PMC_JSON / f'{pmcid}.xml.json'
def _get_bm25Okapi(index_tokens):
has_tokens = index_tokens.apply(len).sum() > 0
if not has_tokens:
index_tokens.loc[0] = ['no', 'tokens']
return BM25Okapi(index_tokens.tolist())
def _set_index_from_text(metadata, data_path):
from gensim.corpora import Dictionary
from cord.jsonpaper import get_json_cache_dir
print('Creating the BM25 index from the text contents of the papers')
json_cache_dir = get_json_cache_dir()
file_paths = [PurePath(p) for p in json_cache_dir.glob(f'jsoncache_*.pq')]
for cache_path in file_paths:
print('Loading json cache file', cache_path.stem)
json_cache = pd.read_parquet(cache_path)
part_no = cache_path.stem[len('jsoncache_'):]
dictionary_path = json_cache_dir / f'jsoncache_{part_no}.dict'
dictionary = Dictionary.load((str(dictionary_path.resolve())))
json_cache['index_tokens'] \
= json_cache.token_int.apply(lambda token_int: [dictionary[ti] for ti in token_int])
json_tokens = json_cache.drop(columns=['token_int']).set_index('cord_uid')
token_lookup = json_tokens.to_dict()['index_tokens']
if 'index_tokens' not in metadata:
metadata['index_tokens'] = metadata['cord_uid'].apply(lambda c: token_lookup.get(c, np.nan))
else:
need_tokens = metadata.index_tokens.isnull()
metadata.loc[need_tokens, 'index_tokens'] = \
metadata.loc[need_tokens, 'cord_uid'].apply(lambda c: token_lookup.get(c, np.nan))
# If the index tokens are still null .. use the abstracts
null_tokens = metadata.index_tokens.isnull()
print('There are', null_tokens.sum(), 'papers that will be indexed using the abstract instead of the contents')
metadata.loc[null_tokens, 'index_tokens'] = metadata.loc[null_tokens].abstract.apply(preprocess)
missing_index_tokens = len(metadata.loc[metadata.index_tokens.isnull()])
if missing_index_tokens > 0:
print('There still are', missing_index_tokens, 'index tokens')
return metadata
def create_annoy_index(document_vectors):
print('Creating Annoy document index')
tick = time.time()
from annoy import AnnoyIndex
annoy_index = AnnoyIndex(20, 'angular')
for i in range(len(document_vectors)):
annoy_index.add_item(i, document_vectors.loc[i])
annoy_index.build()
tock = time.time()
print('Finished creating Annoy document index in', round(tock - tick, 0), 'seconds')
return annoy_index
class ResearchPapers:
def __init__(self, metadata: pd.DataFrame, bm25_index: BM25Okapi = None, data_dir='data', index='abstract',
view='html'):
self.data_path = Path(data_dir)
self.num_results = 10
self.view = view
self.metadata = metadata
if bm25_index is None:
if 'index_tokens' not in metadata:
print('\nIndexing research papers')
if any([index == t for t in ['text', 'texts', 'content', 'contents']]):
tick = time.time()
_set_index_from_text(self.metadata, data_dir)
print("Finished indexing in", int(time.time() - tick), 'seconds')
else:
print('Creating the BM25 index from the abstracts of the papers')
print('Use index="text" if you want to index the texts of the paper instead')
tick = time.time()
self.metadata['index_tokens'] = metadata.abstract.apply(preprocess)
tock = time.time()
print('Finished Indexing in', round(tock - tick, 0), 'seconds')
if 'antivirals' not in self.metadata:
# Add antiviral column
self.metadata['antivirals'] = self.metadata.index_tokens \
.apply(lambda t:
','.join([token for token in t if token.endswith('vir')]))
# Create BM25 search index
self.bm25 = _get_bm25Okapi(self.metadata.index_tokens)
else:
self.bm25 = bm25_index
@staticmethod
def load_metadata(data_path=None):
if not data_path:
data_path = find_data_dir()
print('Loading metadata from', data_path)
metadata_path = PurePath(data_path) / 'metadata.csv'
dtypes = {'Microsoft Academic Paper ID': 'str', 'pubmed_id': str}
renames = {'source_x': 'source', 'has_full_text': 'has_text'}
metadata = pd.read_csv(metadata_path, dtype=dtypes, low_memory=False,
parse_dates=['publish_time']).rename(columns=renames)
metadata = clean_metadata(metadata)
return metadata
@classmethod
def load(cls, data_dir=None, index=None):
if data_dir:
data_path = Path(data_dir) / CORD_CHALLENGE_PATH
else:
data_path = find_data_dir()
metadata = cls.load_metadata(data_path)
return cls(metadata, data_dir=data_path, index=index)
@classmethod
def restore(cls, storage_path='storage'):
tick = time.time()
index_path = Path(storage_path) / 'BM25IndexAbstracts.pq'
with index_path.open('rb') as f:
index = pickle.load(f)
metadata = pd.read_parquet(PurePath('storage') / 'MetadataAbstracts.pq')
papers = cls(metadata=metadata, bm25_index=index)
print('Loaded papers in', time.time() - tick, 'seconds')
return papers
def show_similar(self, paper_id):
similar_paper_ids = similar_papers(paper_id)
self.display(*similar_paper_ids)
def similar_to(self, paper_id):
"""
Find and displays papers similar to the paper
:param paper_id: the cord_uid
:return: None
"""
similar_paper_ids = similar_papers(paper_id)
original_paper = self[paper_id]
style = 'color: #008B8B; font-weight: bold; font-size: 0.9em;'
display(widgets.HTML(
f'<h4>Papers similar to <span style="{style}">{original_paper.title}</span></h4>'))
return self.display(*similar_paper_ids)
def show(self, *paper_ids):
return self.display(*paper_ids)
def display(self, *paper_ids):
if len(paper_ids) == 1:
paper_ids = listify(paper_ids[0])
_recs = []
for id in paper_ids:
paper = self[id]
_recs.append({'published': paper.metadata.published,
'title': paper.title,
'summary': paper.summary,
'when': paper.metadata.when,
'cord_uid': paper.cord_uid})
df = pd.DataFrame(_recs).sort_values(['published'], ascending=False).drop(columns=['published'])
def highlight_cols(s):
return 'font-size: 1.1em; color: #008B8B; font-weight: bold'
return df.style.applymap(highlight_cols, subset=pd.IndexSlice[:, ['title']]).hide_index()
def create_document_index(self):
print('Indexing research papers')
tick = time.time()
index_tokens = self._create_index_tokens()
# Add antiviral column
self.metadata['antivirals'] = index_tokens.apply(lambda t:
','.join([token for token in t if token.endswith('vir')]))
# Does it have any covid term?
self.bm25 = BM25Okapi(index_tokens.tolist())
tock = time.time()
print('Finished Indexing in', round(tock - tick, 0), 'seconds')
def get_json_paths(self):
return get_json_paths(self.metadata, self.data_path)
def describe(self):
cols = [col for col in self.metadata if not col in ['sha', 'index_tokens']]
return describe_dataframe(self.metadata, cols)
def __getitem__(self, item):
if isinstance(item, int):
paper = self.metadata.iloc[item]
else:
paper = self.metadata[self.metadata.cord_uid == item]
return Paper(paper, self.data_path)
def covid_related(self):
return self.query('covid_related')
def not_covid_related(self):
return self.query('~covid_related')
def __len__(self):
return len(self.metadata)
def _make_copy(self, new_data):
return ResearchPapers(metadata=new_data.copy(),
data_dir=self.data_path,
view=self.view)
def query(self, query):
data = self.metadata.query(query)
return self._make_copy(data)
def after(self, date, include_null_dates=False):
cond = self.metadata.published >= date
if include_null_dates:
cond = cond | self.metadata.published.isnull()
return self._make_copy(self.metadata[cond])
def before(self, date, include_null_dates=False):
cond = self.metadata.published < date
if include_null_dates:
cond = cond | self.metadata.published.isnull()
return self._make_copy(self.metadata[cond])
def get_papers(self, sub_catalog):
return self.query(f'full_text_file =="{sub_catalog}"')
def since_sars(self, include_null_dates=False):
return self.after(SARS_DATE, include_null_dates)
def before_sars(self, include_null_dates=False):
return self.before(SARS_DATE, include_null_dates)
def since_sarscov2(self, include_null_dates=False):
return self.after(SARS_COV_2_DATE, include_null_dates)
def before_sarscov2(self, include_null_dates=False):
return self.before(SARS_COV_2_DATE, include_null_dates)
def with_text(self):
return self.query('has_text')
def contains(self, search_str, column='abstract'):
cond = self.metadata[column].fillna('').str.contains(search_str)
return self._make_copy(self.metadata[cond])
def match(self, search_str, column='abstract'):
cond = self.metadata[column].fillna('').str.match(search_str)
return self._make_copy(self.metadata[cond])
def head(self, n):
return self._make_copy(self.metadata.head(n))
def | |
os.path.dirname( os.path.dirname(os.path.realpath(__file__)) )
build_path = os.path.join( repo_path, "build" )
# get the command to launch the driver
driver_command = get_command_line(
valgrind=valgrind,
manager=manager,
nproc1=2,
command1=[driver_name,
"-driver_nranks", "0",
"-plugin_nranks", "2",
"-plugin_name", "engine_py",
"-mdi", "-role DRIVER -name driver -method LINK -plugin_path " + str(build_path),
],
)
# run the calculation
driver_proc = subprocess.Popen(driver_command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=build_path)
driver_tup = driver_proc.communicate()
# convert the driver's output into a string
driver_out = format_return(driver_tup[0])
driver_err = parse_stderr(driver_tup[1])
expected = '''I am engine instance: 1
Engine name: MM
'''
assert driver_err == ""
assert driver_out == expected
assert driver_proc.returncode == 0
##########################
# MPI Method #
##########################
def test_cxx_cxx_mpi(valgrind, manager):
# get the names of the driver and engine codes, which include a .exe extension on Windows
driver_name = glob.glob("../build/driver_cxx*")[0]
engine_name = glob.glob("../build/engine_cxx*")[0]
# get the command to launch the driver
driver_command = get_command_line(
valgrind=valgrind,
manager=manager,
nproc1=1,
command1=[driver_name,
"-mdi", "-role DRIVER -name driver -method MPI",
],
nproc2=1,
command2=[engine_name,
"-mdi", "-role ENGINE -name MM -method MPI",
],
)
# run the calculation
driver_proc = subprocess.Popen(driver_command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=build_dir)
driver_tup = driver_proc.communicate()
# convert the driver's output into a string
driver_out = format_return(driver_tup[0])
driver_err = parse_stderr(driver_tup[1])
assert driver_out == " Engine name: MM\n"
assert driver_err == ""
assert driver_proc.returncode == 0
def test_cxx_cxx_mpi21(valgrind, manager):
# get the names of the driver and engine codes, which include a .exe extension on Windows
driver_name = glob.glob("../build/driver_cxx*")[0]
engine_name = glob.glob("../build/engine_cxx*")[0]
# get the command to launch the driver
driver_command = get_command_line(
valgrind=valgrind,
manager=manager,
nproc1=2,
command1=[driver_name,
"-mdi", "-role DRIVER -name driver -method MPI",
],
nproc2=1,
command2=[engine_name,
"-mdi", "-role ENGINE -name MM -method MPI",
],
)
# run the calculation
driver_proc = subprocess.Popen(driver_command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=build_dir)
driver_tup = driver_proc.communicate()
# convert the driver's output into a string
driver_out = format_return(driver_tup[0])
driver_err = parse_stderr(driver_tup[1])
assert driver_err == ""
assert driver_out == " Engine name: MM\n"
assert driver_proc.returncode == 0
def test_cxx_cxx_mpi12(valgrind, manager):
# get the names of the driver and engine codes, which include a .exe extension on Windows
driver_name = glob.glob("../build/driver_cxx*")[0]
engine_name = glob.glob("../build/engine_cxx*")[0]
# get the command to launch the driver
driver_command = get_command_line(
valgrind=valgrind,
manager=manager,
nproc1=1,
command1=[driver_name,
"-mdi", "-role DRIVER -name driver -method MPI",
],
nproc2=2,
command2=[engine_name,
"-mdi", "-role ENGINE -name MM -method MPI",
],
)
# run the calculation
driver_proc = subprocess.Popen(driver_command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=build_dir)
driver_tup = driver_proc.communicate()
# convert the driver's output into a string
driver_out = format_return(driver_tup[0])
driver_err = parse_stderr(driver_tup[1])
assert driver_err == ""
assert driver_out == " Engine name: MM\n"
assert driver_proc.returncode == 0
def test_cxx_cxx_mpi_serial(valgrind, manager):
# get the names of the driver and engine codes, which include a .exe extension on Windows
driver_name = glob.glob("../build/driver_serial_cxx*")[0]
engine_name = glob.glob("../build/engine_cxx*")[0]
# get the command to launch the driver
driver_command = get_command_line(
valgrind=valgrind,
manager=manager,
nproc1=1,
command1=[driver_name,
"-mdi", "-role DRIVER -name driver -method MPI",
],
nproc2=1,
command2=[engine_name,
"-mdi", "-role ENGINE -name MM -method MPI",
],
)
# run the calculation
driver_proc = subprocess.Popen(driver_command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=build_dir)
driver_tup = driver_proc.communicate()
# convert the driver's output into a string
driver_out = format_return(driver_tup[0])
driver_err = parse_stderr(driver_tup[1])
assert driver_out == " Engine name: MM\n"
assert driver_err == ""
assert driver_proc.returncode == 0
def test_cxx_f90_mpi(valgrind, manager):
# get the names of the driver and engine codes, which include a .exe extension on Windows
driver_name = glob.glob("../build/driver_cxx*")[0]
engine_name = glob.glob("../build/engine_f90*")[0]
# get the command to launch the driver
driver_command = get_command_line(
valgrind=valgrind,
manager=manager,
nproc1=1,
command1=[driver_name,
"-mdi", "-role DRIVER -name driver -method MPI",
],
nproc2=1,
command2=[engine_name,
"-mdi", "-role ENGINE -name MM -method MPI",
],
)
# run the calculation
driver_proc = subprocess.Popen(driver_command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=build_dir)
driver_tup = driver_proc.communicate()
# convert the driver's output into a string
driver_out = format_return(driver_tup[0])
driver_err = parse_stderr(driver_tup[1])
assert driver_err == ""
assert driver_out == " Engine name: MM\n"
assert driver_proc.returncode == 0
def test_cxx_py_mpi(valgrind, manager):
# get the name of the driver code, which includes a .exe extension on Windows
driver_name = glob.glob("../build/driver_cxx*")[0]
# get the command to launch the driver
driver_command = get_command_line(
valgrind=valgrind,
manager=manager,
nproc1=1,
command1=[driver_name,
"-mdi", "-role DRIVER -name driver -method MPI",
],
nproc2=1,
command2=[sys.executable, "engine_py.py",
"-mdi", "-role ENGINE -name MM -method MPI",
],
)
# run the calculation
driver_proc = subprocess.Popen(driver_command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=build_dir)
driver_tup = driver_proc.communicate()
# convert the driver's output into a string
driver_out = format_return(driver_tup[0])
driver_err = parse_stderr(driver_tup[1])
assert driver_err == ""
assert driver_out == " Engine name: MM\n"
assert driver_proc.returncode == 0
def test_f90_cxx_mpi(valgrind, manager):
global driver_out_expected_f90
# get the names of the driver and engine codes, which include a .exe extension on Windows
driver_name = glob.glob("../build/driver_f90*")[0]
engine_name = glob.glob("../build/engine_cxx*")[0]
# get the command to launch the driver
driver_command = get_command_line(
valgrind=valgrind,
manager=manager,
nproc1=1,
command1=[driver_name,
"-mdi", "-role DRIVER -name driver -method MPI",
],
nproc2=1,
command2=[engine_name,
"-mdi", "-role ENGINE -name MM -method MPI",
],
)
# run the calculation
driver_proc = subprocess.Popen(driver_command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=build_dir)
driver_tup = driver_proc.communicate()
# convert the driver's output into a string
driver_out = format_return(driver_tup[0])
driver_err = parse_stderr(driver_tup[1])
assert driver_err == ""
assert driver_out == driver_out_expected_f90
assert driver_proc.returncode == 0
def test_f90_f90_mpi(valgrind, manager):
global driver_out_expected_f90
# get the names of the driver and engine codes, which include a .exe extension on Windows
driver_name = glob.glob("../build/driver_f90*")[0]
engine_name = glob.glob("../build/engine_f90*")[0]
driver_command = get_command_line(
valgrind=valgrind,
manager=manager,
nproc1=1,
command1=[driver_name,
"-mdi", "-role DRIVER -name driver -method MPI",
],
nproc2=1,
command2=[engine_name,
"-mdi", "-role ENGINE -name MM -method MPI",
],
)
# run the calculation
driver_proc = subprocess.Popen(driver_command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=build_dir)
driver_tup = driver_proc.communicate()
# convert the driver's output into a string
driver_out = format_return(driver_tup[0])
driver_err = parse_stderr(driver_tup[1])
assert driver_err == ""
assert driver_out == driver_out_expected_f90
assert driver_proc.returncode == 0
def test_f90_py_mpi(valgrind, manager):
global driver_out_expected_f90
# get the name of the driver code, which includes a .exe extension on Windows
driver_name = glob.glob("../build/driver_f90*")[0]
driver_command = get_command_line(
valgrind=valgrind,
manager=manager,
nproc1=1,
command1=[driver_name,
"-mdi", "-role DRIVER -name driver -method MPI",
],
nproc2=1,
command2=[sys.executable, "engine_py.py",
"-mdi", "-role ENGINE -name MM -method MPI",
],
)
# run the calculation
driver_proc = subprocess.Popen(driver_command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=build_dir)
driver_tup = driver_proc.communicate()
# convert the driver's output into a string
driver_out = format_return(driver_tup[0])
driver_err = parse_stderr(driver_tup[1])
assert driver_err == ""
assert driver_out == driver_out_expected_f90
assert driver_proc.returncode == 0
def test_py_cxx_mpi(valgrind, manager):
global driver_out_expected_py
# get the name of the engine code, which includes a .exe extension on Windows
engine_name = glob.glob("../build/engine_cxx*")[0]
driver_command = get_command_line(
valgrind=valgrind,
manager=manager,
nproc1=1,
command1=[sys.executable, "driver_py.py",
"-mdi", "-role DRIVER -name driver -method MPI",
],
nproc2=1,
command2=[engine_name,
"-mdi", "-role ENGINE -name MM -method MPI",
],
)
# run the calculation
driver_proc = subprocess.Popen(driver_command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=build_dir)
driver_tup = driver_proc.communicate()
# convert the driver's output into a string
driver_out = format_return(driver_tup[0])
driver_err = parse_stderr(driver_tup[1])
assert driver_err == ""
assert driver_out == driver_out_expected_py
assert driver_proc.returncode == 0
def test_py_cxx_plug(valgrind, manager):
# get the directory of the plugins
repo_path = os.path.dirname( os.path.dirname(os.path.realpath(__file__)) )
build_path = os.path.join( repo_path, "build" )
# run the calculation
driver_command = get_command_line(
valgrind=valgrind,
manager=manager,
command1=[sys.executable, "driver_plug_py.py",
"-driver_nranks", "0",
"-plugin_nranks", "1",
"-plugin_name", "engine_cxx",
"-mdi", "-role DRIVER -name driver -method LINK -plugin_path " + str(build_path),
],
)
# run the calculation
driver_proc = subprocess.Popen(driver_command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=build_dir)
driver_tup = driver_proc.communicate()
# convert the driver's output into a string
driver_out = format_return(driver_tup[0])
driver_err = parse_stderr(driver_tup[1])
expected = '''I am engine instance: 1
Engine name: MM
natoms: 10
'''
assert driver_err == ""
assert driver_out == expected
assert driver_proc.returncode == 0
def test_py_cxx_plug_mpi(valgrind, manager):
# get the directory of the plugins
repo_path = os.path.dirname( os.path.dirname(os.path.realpath(__file__)) )
build_path = os.path.join( repo_path, "build" )
# run the calculation
driver_command = get_command_line(
valgrind=valgrind,
manager=manager,
nproc1=2,
command1=[sys.executable, "driver_plug_py.py",
"-driver_nranks", "0",
"-plugin_nranks", "2",
"-plugin_name", "engine_cxx",
"-mdi", "-role DRIVER -name driver -method LINK -plugin_path " + str(build_path),
],
)
driver_proc = subprocess.Popen(driver_command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=build_dir)
driver_tup = driver_proc.communicate()
# convert the driver's output into a string
driver_out = format_return(driver_tup[0])
driver_err = parse_stderr(driver_tup[1])
expected = '''I am engine instance: 1
Engine name: MM
natoms: 10
'''
assert driver_err == ""
assert driver_out == expected
assert driver_proc.returncode == 0
def test_py_f90_mpi(valgrind, manager):
global driver_out_expected_py
# get the name of the engine code, which includes a .exe | |
return term1 + term2
else: return 4*np.abs(E/_c.g0)
def fZ1(E):
if np.abs(E)<np.abs(_c.g0):
return 4*np.abs(E/_c.g0)
else:
term1 = (1+np.abs(E/_c.g0))**2
term2 = -((E/_c.g0)**2 - 1)**2 / 4
return term1 + term2
dos = np.empty_like(E)
for j, e in np.ndenumerate(E):
Z0 = fZ0(e)
Z1 = fZ1(e)
ellip = special.ellipk(np.sqrt(Z1/Z0))
dos[j] = (1/np.sqrt(Z0)) * ellip
DOS = prefactor * dos /_c.A
return DOS
else:
print('The model %s is not available' % (model))
def CarrierDensity(mu,T,model):
'''
Computes the carrier density directly from the band structure.
Parameters
----------
mu: array-like, J
Chemical potential :math:`\\mu`
T: scalar, K
Temperature :math:`T`
Returns
-------
array-like
Notes
-----
When ``T>0``, we use the standard formula of integrating the Fermi-Dirac distribution over the density of states :math:`\\rho` .
.. math::
n = \\int_{-\\infty}^{\\infty} \\frac{1}{e^{(\\epsilon-\\mu)/k_BT}+1}\\rho(\\epsilon) d\\epsilon
For graphene at ``T=0``, this reduces to
.. math::
n = \\frac{\\mu^2}{\\pi\\hbar^2 v_F^2}
'''
if T<0:
raise ValueError('Temperature T must be nonnegative')
if T==0 and model=='LowEnergy':
FermiLevel=mu # chemical potential at T=0 is called Fermi level
n = (FermiLevel / (sc.hbar*_c.vF))**2 / np.pi
return n
if T>0:
n = np.empty_like(mu)
for i, m in np.ndenumerate(mu):
p_electron = lambda e: DensityOfStates(e,model) * sd.FermiDirac(e-m,T)
p_hole = lambda e: DensityOfStates(e,model) * (1 - sd.FermiDirac(e-m,T))
n[i] = ( integrate.quad(p_electron,0,3*_c.g0,points=(_c.g0,m))[0] -
integrate.quad(p_hole,-3*_c.g0,0,points=(-_c.g0,-m))[0] )
return n
def ChemicalPotential(n,T=0,model='LowEnergy'):
'''Returns the chemical potential given the carrier density.
Essentially the inverse of Carrier Density.
Parameters
----------
n: array-like, m :sup:`-2`
Carrier density
T: scalar, K
Temperature
Returns
-------
array-like
Notes
-----
When ``T=0`` and ``model='LowEnergy'`` simultaneously, a closed form expression is used.
.. math::
E_F = \\hbar v_F\\sqrt{\\pi n}
For ``T>0``, we use a numerical routine regardless of model.
'''
if T==0 and model=='LowEnergy':
return sc.hbar*_c.vF*np.sqrt(sc.pi*n)
else:
## Numerically solve
# f is zero when n is correct
f = lambda mu: n - CarrierDensity(mu,T,model=model)
# Use T=0 value to estimate start
mu0 = ChemicalPotential(n,T=0,model=model)
# Add 0.1 eV offset to x1 because in case mu0=0, x0 and x1 would be equal
result = optimize.root_scalar(f,x0=mu0,x1=mu0*1.1+0.1*sc.elementary_charge,
rtol=1e-10).root
return result
########################
# Electrical Transport #
########################
def Mobility(Te,mu0,T0):
'''
Temperature dependent mobility.
See page 4 of Reference 1.
35, 37, 38
References
----------
[1] Shiue et al. 2019
URL: http://www.nature.com/articles/s41467-018-08047-3
[2] Dorgan et al. 2013.
URL: https://doi.org/10.1021/nl400197w
[3] Bae et al. 2010
URL: https://doi.org/10.1021/nl1011596
'''
mu = mu0*(Te/T0)**2.3
return mu
def ScatteringRate(mobility,FermiLevel):
'''
Estimated DC scattering rate from mobility.
Parameters
----------
mobility: scalar, mobility (m^2/V-s)
FermiLevel: scalar, Fermi level (J)
Returns
----------
rate: scalar, scattering rate
'''
# Scattering time
tau = mobility*FermiLevel / (sc.elementary_charge*_c.vF**2)
rate = 1/tau
return rate
def Polarizibility(q,omega,gamma,FermiLevel,T=0):
'''
The Polarizibility :math:`\\chi^0`` of graphene.
Parameters
----------
q: array-like, rad/m
Difference between scattered wavevector and incident
omega: array-like, rad/s
Angular frequency
gamma: scalar, rad/s
scattering rate due to mechanisms such as impurities (i.e. not Landau Damping)
We use the Mermin-corrected Relaxation time approximation (Eqn 4.9 of Ref 1)
FermiLevel: scalar, J
Fermi level of graphene.
T: scalar, K
Temperature
Notes
-----
The Polarizibiliy function in graphene. Can be derived from a
self-consistent field method or the Kubo formula.
.. math::
\\chi^0(\\mathbf q, \\omega) = \\frac{g}{A}\\sum_{nn'\\mathbf k}\\frac{f_{n\\mathbf k} - f_{n'\\mathbf{k+q}}}{\\epsilon_{n\\mathbf k}-\\epsilon_{n'\\mathbf{k+q}}+\\hbar(\\omega+i\\eta)}|\\left<n\\mathbf k|e^{-i\\mathbf{q\\cdot r}}|n'\\mathbf{k+q}\\right>|^2
For ``gamma == 0``, this returns equation 17 of Ref 2, which is the
polarizibility for general complex frequencies.
For ``gamma > 0``, we return the Mermin-corrected Relaxation time approximation
(Eqn 4.9 of Ref 1), which calls the gamma==0 case.
Examples
--------
Plot the real and imaginary part of :math:`\\chi^0`, normalized to density of states. Replicates Fig. 1 of Ref. [3].
.. plot::
>>> from graphenemodeling.graphene import monolayer as mlg
>>> import matplotlib.pyplot as plt
>>> from matplotlib import cm
>>> from scipy.constants import elementary_charge as eV
>>> from scipy.constants import hbar
>>> eF = 0.4*eV
>>> gamma = 0
>>> DOS = mlg.DensityOfStates(eF,model='LowEnergy')
>>> kF = mlg.FermiWavenumber(eF,model='LowEnergy')
>>> omega = np.linspace(0.01,2.5,num=250) / (hbar/eF)
>>> q = np.linspace(0.01,2.5,num=250) * kF
>>> pol = mlg.Polarizibility(q, omega[:,np.newaxis],gamma,eF)
>>> fig, (re_ax, im_ax) = plt.subplots(1,2,figsize=(11,4))
>>> re_img = re_ax.imshow( np.real(pol)/DOS,
... extent=(q[0]/kF,q[-1]/kF,hbar*omega[0]/eF,hbar*omega[-1]/eF),
... vmin=-2,vmax=1, cmap=cm.gray,
... origin = 'lower', aspect='auto'
... )
<...
>>> re_cb = fig.colorbar(re_img, ax = re_ax)
>>> im_img = im_ax.imshow( np.imag(pol)/DOS,
... extent=(q[0]/kF,q[-1]/kF,hbar*omega[0]/eF,hbar*omega[-1]/eF),
... vmin=-1,vmax=0, cmap=cm.gray,
... origin = 'lower', aspect='auto'
... )
<...
>>> im_cb = fig.colorbar(im_img, ax = im_ax)
>>> re_ax.set_ylabel('$\\hbar\\omega$/$\\epsilon_F$',fontsize=14)
Text...
>>> re_ax.set_xlabel('$q/k_F$',fontsize=14)
Text...
>>> im_ax.set_ylabel('$\\hbar\\omega$/$\\epsilon_F$',fontsize=14)
Text...
>>> im_ax.set_xlabel('$q/k_F$',fontsize=14)
Text...
>>> plt.show()
References
----------
[1] Christensen Thesis 2017
[2] <NAME>. Retarded interactions in graphene systems.
[3] <NAME>., <NAME>., <NAME>., and <NAME>. (2006).
Dynamical polarization of graphene at finite doping. New J. Phys. 8, 318–318.
https://doi.org/10.1088%2F1367-2630%2F8%2F12%2F318.
[4] <NAME>., and <NAME>. (2007).
Dielectric function, screening, and plasmons in two-dimensional graphene.
Phys. Rev. B 75, 205418.
https://link.aps.org/doi/10.1103/PhysRevB.75.205418.
'''
if gamma==0 and T==0:
'''
Equation 17 of Ref 2
'''
prefactor = -DensityOfStates(FermiLevel,model='LowEnergy')
kF = FermiWavenumber(FermiLevel, model='LowEnergy')
x = q / (2*kF)
zbar = sc.hbar*omega / (2*FermiLevel)
f = lambda x,zbar: (np.arcsin( (1-zbar)/x) + np.arcsin( (1+zbar)/x )
- ((zbar-1)/x)*np.sqrt(1 - ((zbar-1)/x)**2 )
+ ((zbar+1)/x)*np.sqrt(1 - ((zbar+1)/x)**2 ) )
dd = 1 + (x**2 / (4*np.sqrt(x**2 - (zbar+1e-9*1j)**2 ))) * (sc.pi - f(x,zbar+1e-9*1j))
return prefactor * dd
elif gamma !=0:
# Mermin-corrected Relaxation Time Approximation (Eqn 4.9 of Ref 1)
pol_complex_arg = Polarizibility(q,omega+1j*gamma,0,FermiLevel,T=0)
pol_0 = Polarizibility(q,0,0,FermiLevel,T=0)
numerator = (1 + 1j*gamma/omega) * pol_complex_arg
denominator = 1 + ( 1j*gamma/omega * pol_complex_arg / pol_0 )
return numerator / denominator
def dPolarizibility(q,omega,gamma,FermiLevel,T,dvar,diff=1e-7):
'''
Returns the derivative of the real part of the polarizibility at q, omega
with respect to the chosen variable, dvar.
Parameters
----------
q: array-like, rad/m
Difference between scattered wavevector and incident
omega: array-like, rad/s
Angular frequency
gamma: scalar, rad/s
The scattering rate
FermiLevel: scalar, J
the Fermi level
T: scalar, K
Temperature
dvar: 'omega': Take the partial wrt omega
'q': Take the partial wrt q
diff: Size of finite different to use when computing the derivative.
Method uses central difference.
'''
if dvar == 'omega':
P = lambda w: np.real(Polarizibility(q,w,gamma,FermiLevel,T))
wa, wb = omega*(1-diff), omega*(1+diff)
return (P(wb)-P(wa))/(2*omega*diff)
elif dvar == 'q':
P = lambda qv: np.real(Polarizibility(qv,omega,gamma,FermiLevel,T))
qa,qb = q*(1-diff), q*(1+diff)
return (P(qb)-P(qa))/(2*q*diff)
######################
# Optical Properties #
######################
def ScalarOpticalConductivity(q,omega,gamma,FermiLevel,T,model=None):
'''The diagonal conductivity of graphene :math:`\\sigma_{xx}`.
Parameters
----------
q: array-like, rad/m
Wavenumber
omega: array-like, rad/s
Angular frequency
FermiLevel: scalar, J
the Fermi energy
gamma: scalar, rad/s
scattering rate
T: scalar, K
Temperature
model: string
Typically 'None', but for a specific model, specify it.
Returns
-------
array-like
conductivity at every value of omega
Examples
--------
Plot the optical conductivity normalized to intrinsic conductivity :math:`\\sigma_0`.
Replicates Fig. 4.4 in Ref. [1].
.. plot ::
>>> from graphenemodeling.graphene import monolayer as mlg
>>> from scipy.constants import elementary_charge, hbar
>>> from graphenemodeling.graphene._constants import sigma_0
>>> import matplotlib.pyplot as plt
>>> eF = 0.4 * elementary_charge
>>> g = 0.012 * elementary_charge / hbar
>>> w = np.linspace(0.01,3,num=150) / (hbar/eF)
>>> s_0K_0g = mlg.ScalarOpticalConductivity(q=0,omega=w,gamma=0,FermiLevel=eF,T=0)
>>> s_0K_12g = mlg.ScalarOpticalConductivity(q=0,omega=w,gamma=g,FermiLevel=eF,T=0.01)
>>> s_300K_12g = mlg.ScalarOpticalConductivity(q=0,omega=w,gamma=g,FermiLevel=eF,T=300)
>>> fig, (re_ax, im_ax) = plt.subplots(1,2,figsize=(11,4))
>>> s_Re = np.real(s_0K_0g)
>>> s_Im = np.imag(s_0K_0g)
>>> re_ax.plot(w*hbar/eF,s_Re/sigma_0,'r',label='T=0, $\\hbar\\gamma$=0 meV')
<...
>>> im_ax.plot(w*hbar/eF,s_Im/sigma_0,'r',label='T=0, $\\hbar\\gamma$=0 meV')
<...
>>> s_Re = np.real(s_0K_12g)
>>> s_Im = np.imag(s_0K_12g)
>>> re_ax.plot(w*hbar/eF,s_Re/sigma_0,color='royalblue',label='T=0, $\\hbar\\gamma$=12 meV')
<...
>>> im_ax.plot(w*hbar/eF,s_Im/sigma_0,color='royalblue',label='T=0, $\\hbar\\gamma$=12 meV')
<...
>>> s_Re = np.real(s_300K_12g)
>>> s_Im = np.imag(s_300K_12g)
>>> re_ax.plot(w*hbar/eF,s_Re/sigma_0,color='green',label='T=300 K, $\\hbar\\gamma$=12 meV')
<...
>>> im_ax.plot(w*hbar/eF,s_Im/sigma_0,color='green',label='T=300 K, $\\hbar\\gamma$=12 meV')
<...
>>> re_ax.set_ylabel('Re[$\\sigma$]/$\\sigma_0$')
>>> re_ax.set_xlabel('$\\hbar\\omega/E_F$')
>>> re_ax.plot(w*hbar/eF,np.ones_like(w),'--',color='gray')
>>> re_ax.set_ylim(0,2)
>>> im_ax.set_ylabel('Im[$\\sigma$]/$\\sigma_0$')
>>> im_ax.set_xlabel('$\\hbar\\omega/E_F$')
>>> im_ax.plot(w*hbar/eF,np.zeros_like(w),'--',color='gray')
>>> im_ax.set_ylim(-2,3)
>>> plt.legend()
>>> plt.show()
Notes
-----
The *matrix* optical conductivity :math:`\\overleftrightarrow{\\sigma}(q,\\omega)`
relates the surface current :math:`\\mathbf K(\\omega)`
to an applied electric field :math:`\\mathbf E(\\omega)`.
The fully general, anisotropic, nonlocal expression is given by
.. math::
\\mathbf K(\\omega)=\\int \\overleftrightarrow\\sigma(q,\\omega)\\mathbf E(\\omega) dq
Here, :math:`\\omega` refers to the frequency and :math:`q` refers to the scattering wavevector.
The above expression fully general incorporating anisotropies and nonlocality and is rarely needed.
In most cases, :math:`\\overleftrightarrow{\\sigma}` is isotropic,
so the above equation can be reduced to a *scalar* equation
.. math::
K(\\omega)=\\int \\sigma(q,\\omega)E(\\omega)dq
This is the conductivity in this function. | |
= [True, [40]])
bound = Attack("Bound", 2, "A damage attack; with a Yellow Spike override, the target also loses 15 Strength", damage = [True, [30]], combo = [True, {"STR":15}], consumes = True)
snapshot = Attack("Snapshot", 3, "A damage attack that also swap-blocks the opponent for this and the next 2 rounds", damage = [True, [40]], hack = [True, {"SWAP":3}])
crystal_maul = Attack("Crystal Maul", 3, "A damage attack that also causes the target Nanovor to lose 10 Strength", damage = [True, [40]], hack = [True, {"STR":10}])
turbo_kick = Attack("Turbo Kick", 2, "A damage attack", damage = [True, [40]])
speed_trade = Attack("Speed Trade", 2, "An attack that causes the target Nanovor to lose 20 Speed, and your current Nanovor gains 20 Speed", hack = [True, {"INCSELFSPD":20, "SPD":20}])
fightspeed = Attack("Fightspeed", 3, "A damage attack that ignores Armor; with a Yellow Spike this Nanovor places a +40 Speed Override", damage = [True, [35]], combo = [True, {"SETNEW":{"SPD":40}}], armorpiercing = True)
slow_down = Attack("Slow Down", 1, "An energy attack that causes the target Nanovor to lose 15 Speed", hack = [True, {"SPD":15}])
shock_sting = Attack("Shock Sting", 2, "An electric damage attack", damage = [True, [40]])
poison_sting = Attack("Poison Sting", 2, "A poison damage attack that ignores armor", damage = [True, [30]], armorpiercing = True)
poison_mist = Attack("Poison Mist", 2, "A poison damage attack; with a Yellow Spike override, the target Nanovor also loses 10 Armor", damage = [True, [40]], combo = [True, {"ARM":10}], consumes = True)
shock_and_awe = Attack("Shock and Awe", 2, "An attack that causes the target Nanovor to lose 30 Speed", hack = [True, {"SPD":30}])
scarab_slash = Attack("Scarab Slash", 3, "A fire damage attack", damage = [True, [50]])
tonguestrike = Attack("Tonguestrike", 2, "A damage attack; with a Red Spike override, the opponent is also swap-blocked for this and the next 3 rounds.", damage = [True, [40]], combo = [True, {"SWAP":4}], consumes = True)
psi_cannon = Attack("Psi Cannon", 3, "An attack that causes the target Nanovor to lose 15 Strength; with a Yellow Spike override, it also loses 10 Armor", hack = [True, {"STR":15}], combo = [True, {"ARM":10}], consumes = True)
claw_smash = Attack("Claw Smash", 1, "A damage attack; with a Yellow Spike override, you also gain a +10 Strength override", damage = [True, [30]], combo = [True, {"SETNEW":{"STR":10}}])
whiplash = Attack("Whiplash", 2, "A damage attack that ignores Armor; with a Blue Spike override, the opponent also loses 2 Energy", damage = [True, [30]], combo = [True, {"ENSAP":2}], consumes = True, armorpiercing=True)
crystal_flash = Attack("Crystal Flash", 3, "A ranged attack that also causes the target Nanovor to lose 5 Armor", damage = [True, [40]], hack = [True, {"ARM":5}])
fire_splash = Attack("Fire Splash", 3, "A fire attack", damage = [True, [60]])
triton_chomp = Attack("Triton Chomp", 2, "A damage attack that ignores Armor", damage = [True, [30]], armorpiercing=True)
spitball = Attack("Spitball", 1, "The target Nanovor loses 20 Speed", hack = [True, {"SPD":20}])
sting = Attack("Sting", 2, "A damage attack", damage = [True, [40]])
spark_siphon = Attack("Spark Siphon", 2, "A damage attack, with a Blue Spike override, the target loses 2 Energy", damage = [True, [40]], combo = [True, {"ENSAP":2}], consumes = True)
paralyze = Attack("Paralyze", 3, "An attack that swap-blocks the opponent for this and the next 2 rounds, and causes the target to lose 40 Speed", hack = [True, {"SWAP":3, "SPD":40}])
shockback = Attack("Shockback", 2, "An attack that causes the opponent to lose 3 Energy, and stuns your current Nanovor for this round and next", hack = [True, {"ENSAP":3, "SELFSTUN":1}])
triton_strike = Attack("Triton Strike", 4, "An electric damage attack", damage = [True, [60]])
fang_blast = Attack("Fang Blast", 2, "An energy damage attack that ignores armor", damage = [True, [30]], armorpiercing = True)
snare = Attack("Snare", 3, "An attack that swap-blocks the opponent for this and the next 4 rounds.", hack = [True, {"SWAP":5}])
spiderbite = Attack("Spiderbite", 3, "An electric damage attack; with a Blue Spike override, the opponent also loses 2 Energy", damage = [True, [50]], combo = [True, {"ENSAP":2}], consumes = True)
triton_tumble = Attack("Triton Tumble", 3, "The target Nanovor loses 30 Strength, and your current Nanovor loses 30 Speed", hack = [True, {"STR":30, "DECSELFSPD":30}])
shard_slice = Attack("Shard Slice", 2, "A damage attack that ignores armor", damage = [True, [30]], armorpiercing=True)
jumpstart = Attack("Jumpstart", 2, "This Nanovor places a +25 SPD Override", override = [True, {"SPD":25}])
slider = Attack("Slider", 3, "A damage attack", damage = [True, [50]])
burn = Attack("Burn", 2, "A damage attack that ignores armor", damage = [True, [30]], armorpiercing=True)
weaken = Attack("Weaken", 1, "An attack that causes the target Nanovor to lose 5 Strength", hack = [True, {"STR":5}])
blue_thunder = Attack("Blue Thunder", 3, "A damage attack; with a Blue Spike override, this damage ignores armor", damage = [True, [50]], combo = [True, {"PIERCE":"ALL"}], consumes = True)
slide_by = Attack("Slide-By", 3, "A damage attack that also causes the opponent to lose 1 Energy", damage = [True, [40]], hack = [True, {"ENSAP":1}])
shutdown = Attack("Shutdown", 5, "A ranged attack that causes the target Nanovor to lose 10 Armor and 10 Strength.", hack = [True, {"ARM":10, "STR":10}])
shard_smash = Attack("Shard Smash", 3, "A damage attack that also deletes the opponent's override", damage = [True, [30]], hack = [True, {"OBLIT":0}])
pummel = Attack("Pummel", 2, "A damage attack that ignores armor; with a Blue Spike override, the attack does heavier damage that also ignores armor", damage = [True, [30]], combo = [True, {"DMGSET":50, "PIERCE":"ALL"}], consumes = True, armorpiercing=True)
shard_blast = Attack("Shard Blast", 3, "A ranged damage attack that also causes the target to lose 10 Strength", damage = [True, [30]], hack = [True, {"STR":10}])
fire_smash = Attack("Fire Smash", 2, "A fire damage attack", damage = [True, [40]])
shard_shatter = Attack("Shard Shatter", 4, "A damage attack that ignores armor; with a Blue Spike override, the attack does more damage that also ignores armor", damage = [True, [40]], combo = [True, {"DMGDOUBLE":2, "PIERCE":"ALL"}], consumes = True, armorpiercing=True)
crunchv2 = Attack("Crunch", 2, "A damage attack that ignores armor", damage = [True, [30]], armorpiercing=True)
random_jack = Attack("Random Jack", 3, "An energy attack that either does moderate damage or heavy damage that ignores armor", damage = [True, [30]], special_condition=[True, {"CHANCE-DMG-50": {"XTRAPIERCE":50}}])
acid_burn = Attack("Acid Burn", 3, "An acid damage attack that ignores armor", damage = [True, [35]], armorpiercing=True)
crashoverride = Attack("CrashOverride", 2, "An attack that removes the target Swarms Override; with a Blue Spike override, it also swap-blocks the opponent for this and the next 3 rounds", hack = [True, {"OBLIT":0}], combo = [True, {"SWAP":4}], consumes = True)
phreak = Attack("Phreak", 3, "An energy damage attack that also causes the opponent to lose 5 armor", damage = [True, [20]], hack = [True, {"ARM":5}])
rip = Attack("Rip", 2, "A damage attack that ignores armor", damage = [True, [30]], armorpiercing=True)
whopper = Attack("Whopper", 3, "A ranged attack that drains 5 Energy from the target swarm, and your Nanovor is stunned for two rounds", hack = [True, {"ENSAP":5, "SELFSTUN":2}])
thrash = Attack("Thrash", 2, "A damage attack; with a Blue Spike override, your Nanovor also gains 10 Strength", damage = [True, [40]], combo = [True, {"INCSELFSTR":10}], consumes = True)
cyber_shock = Attack("Cyber Shock", 3, "A ranged electric damage attack that ignores armor", damage = [True, [35]], armorpiercing=True)
depth_charge = Attack("Depth Charge", 5, "A damage attack", damage = [True, [70]])
battle_rush = Attack("Battle Rush", 1, "A damage attack", damage = [True, [30]])
war_dance = Attack("War Dance", 4, "A damage attack that ignores Armor and also causes the target Nanovor to lose 2 Energy", damage = [True, [35]], hack = [True, {"ENSAP":2}], armorpiercing=True)
pounce = Attack("Pounce", 1, "A damage attack; with a Blue Spike override, the attack does more damage that ignores armor", damage = [True, [30]], combo = [True, {"DMGSET":45, "PIERCE":"ALL"}], consumes = True)
chop_drop = Attack("Chop Drop", 3, "An energy attack that also causes the target Nanovor to lose 10 Speed", damage = [True, [50]], hack = [True, {"SPD":10}])
battle_dance = Attack("Battle Dance", 4, "An attack that causes the target swarm to lose 1 Energy and your Nanovor gains 20 Speed", hack = [True, {"ENSAP":1, "INCSELFSPD":20}])
psi_drain = Attack("Psi Drain", 4, "An energy attack that causes the opponent to lose 25 Speed | |
<reponame>unseenme/mindspore<gh_stars>1-10
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
""" opt_test """
from mindspore.ops import Primitive, PrimitiveWithInfer
from mindspore.ops import operations as P
from mindspore.ops.operations import _grad_ops as G
from mindspore import Tensor
import numpy as np
# pylint: disable=unused-variable
# opt test data, not for running
# pylint: disable=unused-argument
# pylint: disable=redefined-outer-name
scalar_add = Primitive('scalar_add')
scalar_mul = Primitive('scalar_mul')
tuple_getitem = Primitive('tuple_getitem')
switch = Primitive('switch')
def test_sexp_conversion():
""" test_sexp_conversion """
return scalar_mul(10, scalar_add(5, 4))
class FnDict:
def __init__(self):
self.fnDict = {}
def __call__(self, fn):
self.fnDict[fn.__name__] = fn
def __getitem__(self, name):
return self.fnDict[name]
def test_add_zero(tag):
""" test_add_zero """
fns = FnDict()
@fns
def before_1(x):
return scalar_add(x, 0)
@fns
def before_2(x):
return scalar_add(scalar_add(x, 0), 0)
@fns
def after(x):
return x
return fns[tag]
def test_elimR(tag):
""" test_elimR """
R = Primitive('R')
fns = FnDict()
@fns
def before_1(x):
return R(x)
@fns
def after(x):
return x
return fns[tag]
def test_idempotent(tag):
""" test_idempotent """
P = Primitive('P')
R = Primitive('R')
fns = FnDict()
@fns
def before_1(x):
return P(P(x))
@fns
def before_2(x):
return P(P(P(P(P(x)))))
@fns
def after(x):
return P(x)
return fns[tag]
def test_constant_variable(tag):
""" test_constant_variable """
P = Primitive('P')
Q = Primitive('Q')
fns = FnDict()
@fns
def before_1(x):
return Q(15) + Q(x)
@fns
def after(x):
return P(15) + Q(x)
return fns[tag]
def cost(x):
""" cost """
return x * 10
J = Primitive('J')
def test_expendJ(x):
""" test_expendJ """
return J(cost)(x)
def test_elim_jinv_j(tag):
""" test_elim_jinv_j """
J = Primitive('J')
Jinv = Primitive('Jinv')
fns = FnDict()
@fns
def before_1(x):
return J(Jinv(x))
@fns
def before_2(x):
return Jinv(J(x))
@fns
def after(x):
return x
return fns[tag]
def test_simplify_always_true_false(tag):
""" test_simplify_always_true_false """
fns = FnDict()
@fns
def before_1(x, y):
return switch(True, x, y)
@fns
def before_2(x, y):
return switch(False, y, x)
@fns
def after(x, y):
return x
return fns[tag]
def test_inline(tag):
""" test_inline """
fns = FnDict()
@fns
def before(x, y):
def fn1(x1):
return x1
return fn1(x)
@fns
def after(x, y):
return x
return fns[tag]
def test_inline_successively(tag):
""" test_inline_successively """
fns = FnDict()
def one(x):
return x + 1
def two(x):
return one(x + 2)
def three(x):
return two(x + 3)
@fns
def before(x):
return three(x)
@fns
def after(x):
return x + 3 + 2 + 1
return fns[tag]
def test_inline_closure(tag):
""" test_inline_closure """
fns = FnDict()
@fns
def before(x, y, z):
c = z * z
def f(x):
return x + c
return f(x * y)
@fns
def after(x, y, z):
c = z * z
return x * y + c
return fns[tag]
def test_inline_deep_closure(tag):
""" test_inline_deep_closure """
fns = FnDict()
def f(x):
w = x * x
def g():
def h():
return w
return h()
return g
@fns
def before(x, y):
return f(x)() - f(y)()
@fns
def after(x, y):
w1 = x * x
w2 = y * y
return w1 - w2
return fns[tag]
def test_inline_new_closure(tag):
""" test_inline_new_closure """
fns = FnDict()
def q(x):
return x * x
def f(x):
def g():
return q(x)
return g
@fns
def before(x):
return f(x)
@fns
def after(x):
def g():
return x * x
return g
return fns[tag]
def test_inline_recursive_direct(tag):
""" test_inline_recursive_direct """
fns = FnDict()
@fns
def before1(x):
return before1(x - 1)
@fns
def before2(x):
def helper1(x):
return before2(x - 1)
def helper2(x):
return before1(x - 1)
return helper1(x)
return fns[tag]
def test_inline_recursive(tag):
""" test_inline_recursive """
fns = FnDict()
@fns
def before(x):
if x <= 0:
return x
return before(x - 1)
return fns[tag]
def test_inline_while(tag):
""" test_inline_while """
fns = FnDict()
@fns
def before(x):
rval = x
while rval < 100:
rval = rval * rval
return rval
return fns[tag]
def test_cse(tag):
""" test_cse """
fns = FnDict()
scalar_div = Primitive('scalar_div')
@fns
def test_f1(x, y):
a = scalar_add(x, y)
b = scalar_add(x, y)
c = scalar_mul(a, b)
return c
@fns
def test_f2(x, y):
a = scalar_add(x, y)
b = scalar_add(scalar_mul(a, y), scalar_div(a, x))
c = scalar_add(scalar_mul(a, y), scalar_div(scalar_add(x, y), x))
d = scalar_add(b, c)
return d
return fns[tag]
def test_arithmetic(tag):
""" test_arithmetic """
fns = FnDict()
identity = Primitive('identity')
@fns
def multiply_by_zero_l(x):
return scalar_mul(x, 0)
@fns
def multiply_by_zero_r(x):
return scalar_mul(0, x)
@fns
def after_0(x):
return 0
@fns
def multiply_by_one_l(x):
return scalar_mul(x, 1)
@fns
def multiply_by_one_r(x):
return scalar_mul(1, x)
@fns
def add_zero_l(x):
return scalar_add(x, 0)
@fns
def add_zero_r(x):
return scalar_add(0, x)
@fns
def elim_identity(x):
return identity(x)
@fns
def after(x):
return x
return fns[tag]
def test_elim_cast_same_dtype(tag):
""" test_elim_cast_same_dtype """
fns = FnDict()
cast = P.Cast()
@fns
def fp32_cast_fp32(x, y):
return cast(x, y)
@fns
def after(x, y):
return x
return fns[tag]
def elim_reshape_same_shape(tag):
""" elim_reshape_same_shape """
fns = FnDict()
reshape = P.Reshape()
shape = (2, 3)
@fns
def reshape_to_2_3(x):
return reshape(x, shape)
@fns
def after(x):
return x
return fns[tag]
def elim_two_reshape(tag):
""" elim_two_reshape """
fns = FnDict()
reshape = P.Reshape()
shape = (2, 3)
shape_2 = (3, 2)
@fns
def before(x):
return reshape(reshape(x, shape_2), shape)
@fns
def after(x):
return reshape(x, shape)
return fns[tag]
def elim_two_cast(tag):
""" elim_two_cast """
fns = FnDict()
cast = P.Cast()
@fns
def before(x, a, b):
return cast(cast(x, a), b)
@fns
def after(x, a, b):
return cast(x, b)
return fns[tag]
def test_elim_transpose(tag):
""" test_elim_transpose """
fns = FnDict()
transpose = P.Transpose()
perm = (0, 1, 2)
@fns
def before(x):
return transpose(x, perm)
@fns
def after(x):
return x
return fns[tag]
def test_elim_tile_multiply_one(tag):
""" test_elim_tile_multiply_one """
fns = FnDict()
tile = P.Tile()
all_one = (1, 1, 1)
@fns
def before(x):
return tile(x, all_one)
@fns
def after(x):
return x
return fns[tag]
def test_elim_reduce_mean_shape_one(tag):
""" test_elim_reduce_mean_shape_one """
fns = FnDict()
reduce_mean = P.ReduceMean()
@fns
def before(x, y):
return reduce_mean(x, 0)
@fns
def after(x, y):
return x
return fns[tag]
def test_elim_all_shape_one(tag):
""" test_elim_all_shape_one """
fns = FnDict()
all_ = P.ReduceAll()
@fns
def before(x, y):
return all_(x, 0)
@fns
def after(x, y):
return x
return fns[tag]
def test_elim_sum_shape_one(tag):
""" test_elim_sum_shape_one """
fns = FnDict()
sum_ = P.ReduceSum()
@fns
def before(x, y):
return sum_(x, 0)
@fns
def after(x, y):
return x
return fns[tag]
def test_tuple_getitem(tag):
""" test_tuple_getitem """
fns = FnDict()
make_tuple = Primitive('make_tuple')
@fns
def make_get_0(x, y):
return tuple_getitem(make_tuple(x, y), 0)
@fns
def make_get_1(x, y):
return tuple_getitem(make_tuple(x, y), 1)
@fns
def after_0(x, y):
return x
@fns
def after_1(x, y):
return y
return fns[tag]
def test_tuple_setitem(tag):
""" test_tuple_setitem """
fns = FnDict()
make_tuple = Primitive('make_tuple')
tuple_setitem = Primitive('tuple_setitem')
@fns
def before_0(x, y, z):
return tuple_setitem(make_tuple(x, y), 0, z)
@fns
def before_1(x, y, z):
return tuple_setitem(make_tuple(x, y), 1, z)
@fns
def after_0(x, y, z):
return make_tuple(z, y)
@fns
def after_1(x, y, z):
return make_tuple(x, z)
return fns[tag]
def test_tuple_get_set_item(tag):
""" test_tuple_get_set_item """
fns = FnDict()
tuple_setitem = Primitive('tuple_setitem')
@fns
def before_0(t, x):
return tuple_getitem(tuple_setitem(t, 0, x), 0)
@fns
def after_0(t, x):
return x
@fns
def before_1(t, x):
return tuple_getitem(tuple_setitem(t, 0, x), 1)
@fns
def after_1(t, x):
return tuple_getitem(t, 1)
return fns[tag]
def test_partial(tag):
""" test_partial """
fns = FnDict()
partail = Primitive('partial')
def f(x, y):
return scalar_add(x, y)
@fns
def before(x, y):
return partail(f, x)(y)
@fns
def after(x, y):
return f(x, y)
return fns[tag]
def test_replace_applicator(tag):
""" test_replace_applicator """
fns = FnDict()
partail = Primitive('partial')
def app1(x, y):
return scalar_add(x, y)
def app2(x, y):
return app1(x, y)
def app3(x, y):
return scalar_add(y, x)
@fns
def before1(x, y):
return app1(x, y)
@fns
def before2(x, y):
return app2(x, y)
@fns
def before3(x, y):
return app3(x, y)
@fns
def after(x, y):
return scalar_add(x, y)
return fns[tag]
def test_specialize_on_graph_arguments(tag):
""" test_specialize_on_graph_arguments """
fns = FnDict()
f1 = Primitive('f1')
f2 = Primitive('f2')
@fns
def before(x, y):
def helper(f, x, g, y):
return scalar_add(f(x), g(y))
return helper(f1, x, f2, y)
@fns
def after(x, y):
def helper(x, y):
return scalar_add(f1(x), f2(y))
return helper(x, y)
return fns[tag]
def test_incorporate_getitem(tag):
""" test_incorporate_getitem """
fns = FnDict()
f1 = Primitive('f1')
f2 = Primitive('f2')
@fns
def before1(x, y):
def fn(x, y):
return f1(x, y), | |
= int(np.abs(np.log10(precision)))
qdesign_precision = self.design.template_options.PRECISION
all_idx_bad_fillet['reduced_idx'] = get_range_of_vertex_to_not_fillet(
coords, a_fillet, qdesign_precision, add_endpoints=True)
midpoints = list()
midpoints = [
QGDSRenderer.midpoint_xy(coords[idx - 1][0], coords[idx - 1][1],
vertex2[0], vertex2[1])
for idx, vertex2 in enumerate(coords)
if idx > 0
]
all_idx_bad_fillet['midpoints'] = midpoints
# Move data around to be useful for GDS
def gather_subtract_elements_and_bounds(self, chip_name: str,
table_name: str,
table: geopandas.GeoDataFrame,
all_subtracts: list,
all_no_subtracts: list):
"""For every chip, and layer, separate the "subtract" and "no_subtract" elements
and gather bounds for all the elements in qgeometries.
Use format: f'{chip_name}_{table_name}s'.
Args:
chip_name (str): Name of chip. Example is 'main'.
table_name (str): There are multiple tables in QGeometry table. Example: 'path' and 'poly'.
table (geopandas.GeoDataFrame): Actual table for the name.
all_subtracts (list): Pass by reference so method can update this list.
all_no_subtracts (list): Pass by reference so method can update this list.
"""
# Determine bound box and return scalar larger than size.
bounds = tuple(self.get_bounds(table))
# Add the bounds of each table to list.
self.dict_bounds[chip_name]['gather'].append(bounds)
if is_true(self.options.ground_plane):
self.separate_subtract_shapes(chip_name, table_name, table)
all_subtracts.append(
getattr(self, f'{chip_name}_{table_name}_subtract_true'))
all_no_subtracts.append(
getattr(self, f'{chip_name}_{table_name}_subtract_false'))
# Done because ground plane option may be false.
# This is not used anywhere currently.
# Keep this depreciated code.
# polys use gdspy.Polygon; paths use gdspy.LineString
'''
q_geometries = table.apply(self.qgeometry_to_gds, axis=1)
setattr(self, f'{chip_name}_{table_name}s', q_geometries)
'''
def get_table(self, table_name: str, unique_qcomponents: list,
chip_name: str) -> geopandas.GeoDataFrame:
"""If unique_qcomponents list is empty, get table using table_name from QGeometry tables
for all elements with table_name. Otherwise, return a table with fewer elements, for just the
qcomponents within the unique_qcomponents list.
Args:
table_name (str): Can be "path", "poly", etc. from the QGeometry tables.
unique_qcomponents (list): User requested list of component names to export to GDS file.
Returns:
geopandas.GeoDataFrame: Table of elements within the QGeometry.
"""
# self.design.qgeometry.tables is a dict. key=table_name, value=geopandas.GeoDataFrame
if len(unique_qcomponents) == 0:
table = self.design.qgeometry.tables[table_name]
else:
table = self.design.qgeometry.tables[table_name]
# Convert string QComponent.name to QComponent.id
highlight_id = [
self.design.name_to_id[a_qcomponent]
for a_qcomponent in unique_qcomponents
]
# Remove QComponents which are not requested.
table = table[table['component'].isin(highlight_id)]
table = table[table['chip'] == chip_name]
return table
# To export the data.
def new_gds_library(self) -> gdspy.GdsLibrary:
"""Creates a new GDS Library. Deletes the old.
Create a new GDS library file. It can contains multiple cells.
Returns:
gdspy.GdsLibrary: GDS library which can contain multiple celles.
"""
self.update_units()
if self.lib:
self._clear_library()
# Create a new GDS library file. It can contains multiple cells.
self.lib = gdspy.GdsLibrary(
unit=float(self.parse_value(self.options.gds_unit)))
return self.lib
def check_cheese(self, chip: str, layer: int) -> int:
"""Examine the option for cheese_view_in_file.
Args:
chip (str): User defined chip name.
layer (int): Layer used in chip.
Returns:
int: Oberservation of option based on chip and layer information.
* 0 This is the initialization state.
* 1 The layer is in the chip and cheese is True.
* 2 The layer is in the chip and cheese is False.
* 3 The chip is not in dict, so can't give answer.
* 4 The layer is not in the chip, so can't give answer.
"""
code = 0
cheese_option = self.parse_value(self.options.cheese.view_in_file)
if chip in cheese_option:
if layer in cheese_option[chip]:
if is_true(cheese_option[chip][layer]):
code = 1
else:
code = 2
else:
code = 4
else:
code = 3
return code
def check_no_cheese(self, chip: str, layer: int) -> int:
"""Examine the option for no_cheese_view_in_file.
Args:
chip (str): User defined chip name.
layer (int): Layer used in chip.
Returns:
int: Oberservation of option based on chip and layer information.
* 0 This is the initialization state.
* 1 The layer is in the chip and viewing no-cheese is True.
* 2 The layer is in the chip and viewing no-cheese is False.
* 3 The chip is not in dict, so can't give answer.
* 4 The layer is not in the chip, so can't give answer.
"""
code = 0
no_cheese_option = self.parse_value(self.options.no_cheese.view_in_file)
if chip in no_cheese_option:
if layer in no_cheese_option[chip]:
if is_true(no_cheese_option[chip][layer]):
code = 1
else:
code = 2
else:
code = 4
else:
code = 3
return code
def check_either_cheese(self, chip: str, layer: int) -> int:
"""Use methods to check two options and give review of values
for no_cheese_view_in_file and cheese_view_in_file.
Args:
chip (str): User defined chip name.
layer (int): Layer used in chip.
Returns:
int: Oberservation of options based on chip and layer information.
* 0 This is the initialization state.
* 1 Show the layer in both cheese and no cheese
* 2 Show the layer in just the cheese
* 3 Show the no-cheese, but not the cheese
* 4 Do NOT show the layer in neither cheese
* 5 The chip is not in the default option.
* 6 The layer is not in the chip dict.
"""
code = 0
no_cheese_code = self.check_no_cheese(chip, layer)
cheese_code = self.check_cheese(chip, layer)
if no_cheese_code == 0 or cheese_code == 0:
self.logger.warning(
f'Not able to get no_cheese_view_in_file or cheese_view_in_file from self.options.'
)
code = 0
return code
if no_cheese_code == 1 and cheese_code == 1:
code = 1
return code
if no_cheese_code == 2 and cheese_code == 1:
code = 2
return code
if no_cheese_code == 1 and cheese_code == 2:
code = 3
return code
if no_cheese_code == 2 and cheese_code == 2:
code = 4
return code
if no_cheese_code == 3 or cheese_code == 3:
code = 5
self.logger.warning(
f'Chip={chip} is not either in no_cheese_view_in_file or cheese_view_in_file from self.options.'
)
return code
if no_cheese_code == 4 or cheese_code == 4:
code = 6
self.logger.warning(
f'layer={layer} is not in chip={chip} either in no_cheese_view_in_file or cheese_view_in_file from self.options.'
)
return code
return code
def populate_cheese(self):
""" Iterate through each chip, then layer to determine the cheesing geometry.
"""
lib = self.lib
cheese_sub_layer = int(self.parse_value(self.options.cheese.datatype))
nocheese_sub_layer = int(
self.parse_value(self.options.no_cheese.datatype))
for chip_name in self.chip_info:
layers_in_chip = self.design.qgeometry.get_all_unique_layers(
chip_name)
for chip_layer in layers_in_chip:
code = self.check_cheese(chip_name, chip_layer)
if code == 1:
chip_box, status = self.design.get_x_y_for_chip(chip_name)
if status == 0:
minx, miny, maxx, maxy = chip_box
cheesed = self.cheese_based_on_shape(
minx, miny, maxx, maxy, chip_name, chip_layer,
cheese_sub_layer, nocheese_sub_layer)
def cheese_based_on_shape(self, minx: float, miny: float, maxx: float,
maxy: float, chip_name: str, chip_layer: int,
cheese_sub_layer: int, nocheese_sub_layer: int):
"""Instantiate class to do cheesing.
Args:
minx (float): Chip minimum x location.
miny (float): Chip minimum y location.
maxx (float): Chip maximum x location.
maxy (float): chip maximum y location.
chip_name (str): User defined chip name.
layer (int): Layer number for calculating the cheese.
cheese_sub_layer (int): User defined datatype, considered a sub-layer number for where to place the cheese output.
nocheese_sub_layer (int): User defined datatype, considered a sub-layer number for where to place the NO_cheese output.
"""
max_points = int(self.parse_value(self.options.max_points))
cheese_shape = int(self.parse_value(self.options.cheese.shape))
all_nocheese = self.chip_info[chip_name][chip_layer]['no_cheese']
all_nocheese_gds = self.chip_info[chip_name][chip_layer][
'no_cheese_gds']
delta_x = float(self.parse_value(self.options.cheese.delta_x))
delta_y = float(self.parse_value(self.options.cheese.delta_y))
edge_nocheese = float(
self.parse_value(self.options.cheese.edge_nocheese))
precision = float(self.parse_value(self.options.precision))
if cheese_shape == 0:
cheese_x = float(self.parse_value(self.options.cheese.cheese_0_x))
cheese_y = float(self.parse_value(self.options.cheese.cheese_0_y))
a_cheese = Cheesing(all_nocheese,
all_nocheese_gds,
self.lib,
minx,
miny,
maxx,
maxy,
chip_name,
edge_nocheese,
chip_layer,
cheese_sub_layer,
nocheese_sub_layer,
self.logger,
max_points,
precision,
cheese_shape=cheese_shape,
shape_0_x=cheese_x,
shape_0_y=cheese_y,
delta_x=delta_x,
delta_y=delta_y)
elif cheese_shape == 1:
cheese_radius = float(
self.parse_value(self.options.cheese.cheese_1_radius))
a_cheese = Cheesing(all_nocheese,
all_nocheese_gds,
self.lib,
minx,
miny,
maxx,
maxy,
chip_name,
edge_nocheese,
chip_layer,
cheese_sub_layer,
nocheese_sub_layer,
self.logger,
max_points,
precision,
cheese_shape=cheese_shape,
shape_1_radius=cheese_radius,
delta_x=delta_x,
delta_y=delta_y)
else:
self.logger.warning(
f'The cheese_shape={cheese_shape} is unknown in QGDSRenderer.')
a_cheese = None
if a_cheese is not None:
a_lib = a_cheese.apply_cheesing()
return
def populate_no_cheese(self):
"""Iterate through every chip and layer. If options choose to have either cheese or no-cheese,
a MultiPolygon is placed self.chip_info[chip_name][chip_layer]['no_cheese'].
If user selects to view the no-cheese, the method placed the cell with no-cheese
at f'NoCheese_{chip_name}_{chip_layer}_{sub_layer}'. The sub_layer is data_type and denoted
in the options.
"""
no_cheese_buffer = float(self.parse_value(
self.options.no_cheese.buffer))
sub_layer = int(self.parse_value(self.options.no_cheese.datatype))
lib = self.lib
for chip_name in self.chip_info:
layers_in_chip = self.design.qgeometry.get_all_unique_layers(
chip_name)
for chip_layer in layers_in_chip:
code = self.check_either_cheese(chip_name, chip_layer)
if code == 1 or code == 2 or code == 3:
if len(self.chip_info[chip_name][chip_layer]
['all_subtract_true']) != 0:
sub_df = self.chip_info[chip_name][chip_layer][
'all_subtract_true']
no_cheese_multipolygon = self.cheese_buffer_maker(
sub_df, chip_name, no_cheese_buffer)
if | |
iterate over
activate_list.append(k)
# Remove all loads that are started
for k in activate_list:
self.waiting_list.pop(k)
wait_lenght = len(self.waiting_list) # Number of waiting items
tries = 0
temp_waitlist = self.waiting_list.copy()
while ((self.current_power < self.threshold) and self.waiting_list):
# find the background node that should be turned on
node_id, node_details = self.find_least_slack(temp_waitlist)
print("Want to turn on " + str(node_id))
if((self.current_power + node_details['power']) < self.threshold):
print("Turn on " + str(node_id))
# Send activate msg to the background node
payload = json.dumps({'action':'approved'}).encode('utf-8')
self.sockets[node_id].send(payload)
# Update current power
self.current_power += node_details['power']
# Add it to the active list and remove it from waiting list
self.active_list[node_id] = {'id': node_id}
self.waiting_list.pop(node_id)
temp_waitlist.pop(node_id)
# Add it to background loads to be able to see active backgrounds
self.background_load[node_id] = node_details
else:
temp_waitlist.pop(node_id)
tries += 1
if (tries == wait_lenght):
break
else:
print("Uses to much power to enable background")
"""
Function that runs every hour in order to reset background loads.
"""
def reset_backgrounds(self):
# Loop through all background devices and reset the time
for k, v in self.background_list.items():
v['time'] = self.node_list[k]['time']
self.background_list.update({k: v})
# If we miss someone, should throw error or empty the list
if ((len(self.waiting_list) != 0) or (len(self.background_load) != 0)):
# Remove all background loads from active list
for k, v in self.background_list.items():
try:
self.active_list.pop(k)
self.current_power -= self.node_list[k]['power']
except:
continue
self.waiting_list.clear()
self.background_load.clear()
print("Opps! Missed to schedule some background loads")
# Add all reset items to the list again
for k, v in self.background_list.items():
self.waiting_list[k] = v
###########################################################################
# General Helper functions #
###########################################################################
"""
We want to change to the next block. Decrease the remaining
time of all background loads with 1 and check if we should disconnect
a scheduled task for now. The scheduled task might be finished, otherwise
it will be started another block.
"""
def decrease_time(self):
disconnect_list = []
# If it is a background task
if (self.background_load):
for k, v in self.background_load.items():
v['time'] = v['time'] - 1
# If time is 0, disconnect the device
if (v['time'] == 0):
print(str(k) + " is done for this hour")
# Send disconnect msg to the background node
payload = json.dumps({'action':'disconnect'}).encode('utf-8')
self.sockets[k].send(payload)
self.current_power -= v['power']
self.active_list.pop(k)
# add id:s to a temporary list since you can't change size of the list you iterate over
disconnect_list.append(k)
# Remove all loads that are done
for k in disconnect_list:
self.background_load.pop(k)
# If it is a deadline task
if (self.deadline_load):
# Get all scheduled task next hour
total_clocks = self.blocks_per_hour * 24
next_step = self.block_schedule[((self.clock + 1) % total_clocks)]
for node in self.block_schedule[self.clock]:
if ((node['id'] in self.active_list) and (node not in next_step)):
payload = json.dumps({'action':'disconnect'}).encode('utf-8')
self.sockets[node['id']].send(payload)
self.deadline_power -= self.node_list[node['id']]['power']
self.active_list.pop(node['id'])
self.deadline_load.pop(node['id'])
###########################################################################
# Communication and handle helpers #
###########################################################################
"""
Register a node. Save neccesary information about it.
"""
def handle_register(self, payload):
# Add the node to the list of all nodes
print('Register from node: ' + str(payload['id']))
self.node_list[payload['id']] = payload['details'].copy()
id = payload['id']
# Check if the node is a background task
if (payload['details']['flexible'] == 1):
self.background_list[payload['id']] = payload['details']
self.waiting_list[payload['id']] = payload['details']
elif (payload['details']['flexible'] == 2):
print("Scheduable task")
return id
"""
Handle a request from a node. This might be a request
to schedule something with a deadline or an interactive
task that needs to be started right away.
"""
def handle_request(self, payload):
print('Request from node: ' + str(payload['id']))
id = payload['id']
# Get the tuple of details based on the requested node's id
details = self.node_list[payload['id']]
# Check which flexibility the node has
# Interactive load
if (details['flexible'] == 0):
print('Interactive load')
# Add the power to the total consumption
self.current_power += details['power']
# Add device to active list
self.active_list[payload['id']] = {'id': payload['id']}
# Send approval to the node
payload = json.dumps({'action':'approved'}).encode('utf-8')
self.sockets[id].send(payload)
# If interactive load exceed the limit, turn off background load
if self.current_power > self.threshold:
# Until we have a current power below threshold, continue, worst case is
# when we have emergency loads, then we will break anyway
tmp_backgroundload = self.background_load.copy()
while ((self.current_power > self.threshold) and tmp_backgroundload):
# find the background node that should be turned off
node_id, node_details = self.find_least_slack(tmp_backgroundload)
# Check that there arent any background loads to disconnect
if (not node_id):
break
# If the time left is the same time as the node require, don't pause it
time_left = self.blocks_per_hour - (self.clock % 6) - 1
if (node_details['time'] == time_left):
tmp_backgroundload.pop(node_id)
continue
# Send disconnect msg to the background node
payload = json.dumps({'action':'disconnect'}).encode('utf-8')
self.sockets[node_id].send(payload)
# Remove it from the active list
self.active_list.pop(node_id)
self.background_load.pop(node_id)
tmp_backgroundload.pop(node_id)
# Add the device back to the waiting list
self.waiting_list[node_id] = node_details
# Decrease the power
self.current_power -= self.node_list[node_id]['power']
# Deadline task
elif (details['flexible'] == 2):
print('Schedulable')
deadline = details['deadline']
duration = details['time']
self.schedule_deadline_task(id, deadline, duration)
else:
raise Exception
"""
Disconnect a load. This should not happen except
for interactive loads.
"""
def handle_disconnect(self, payload):
print('Disconnect from node: ' + str(payload['id']))
id = payload['id']
self.active_list.pop(id)
payload = json.dumps({'action':'disconnect'}).encode('utf-8')
self.sockets[id].send(payload)
details = self.node_list[id]
self.current_power -= details['power']
"""
Update from a node. This is currently not used
from the node / load since we assume that loads
does not change during runtime.
"""
def handle_update(self, payload):
print('Update from node: ' + str(payload['id']))
"""
Helper function for receive
"""
def handle_recv(self, s):
try:
data = s.recv(1024)
except Exception as e:
return
if not data:
return
data = data.decode('utf-8')
try:
data = json.loads(data)
except Exception as e:
print (e)
return
return data
"""
Helper function to handle different incoming
actions. Send to the correct helper function for
that action.
"""
def handle_action(self, data):
action = data['action']
payload = data['payload']
# Request action
if (action == 'request'):
self.handle_request(payload)
# Update action
elif (action == 'update'):
self.handle_update(payload)
# Disconnect action
elif (action == 'disconnect'):
self.handle_disconnect(payload)
# Invalid, drop it
else:
print('Invalid action received')
###########################################################################
# Main #
###########################################################################
def main(self, plot):
# Set up the graph
if (plot):
plt.ion()
self.figure, self.axis = plt.subplots()
self.lines, = self.axis.plot([],[], 'r-', label="Watt")
self.axis.set_autoscaley_on(True)
self.axis.set_xlim(0, 144)
self.axis.set_ylim(0, 5000)
self.axis.set_xlabel('Blocks')
self.axis.set_ylabel('Watt')
self.axis.grid()
plt.legend()
# Lists in order to keep track of usage different hours
block_usage = []
blocks = []
while True:
if (plot):
if self.current_hour >= 0 and self.current_hour < 12:
blocks.append(self.clock + 72)
else:
blocks.append(self.clock - 72)
block_usage.append(self.current_power + self.deadline_power)
plt.pause(0.05)
#plt.plot(blocks, block_usage, zorder=1)
self.lines.set_xdata(blocks)
self.lines.set_ydata(block_usage)
self.axis.relim()
self.axis.autoscale_view()
self.figure.canvas.draw()
self.figure.canvas.flush_events()
# Print useful debugging information
print("======== New block ========")
print("Current power: " + str(self.current_power))
print("Deadline power: " + str (self.deadline_power))
print("Active list: " + str(self.active_list))
print("Background load: " + str(self.background_load))
print("Deadline load: " + str(self.deadline_load))
print("Waiting list: " + str(self.waiting_list))
print("Clock: " + str(self.clock))
print("Hour: " + str(self.current_hour))
# The scheduler for already scheduled tasks, check if some should be turned on
self.check_scheduled_tasks()
# Always decrease time when we executed one turn in the loop
self.decrease_time()
# Fetch current second
self.current_second = int(time.strftime('%S', time.gmtime()))
# The scheduler for the background loads
self.schedule_background((self.clock%self.blocks_per_hour))
# Wait here until next second
while(self.current_second == int(time.strftime('%S', time.gmtime()))):
# Check if the main socket has connection
readable, writable, errored = select.select([self.server_socket], [], [], 0)
for s in readable:
if s is self.server_socket:
client_socket, address = self.server_socket.accept()
data = client_socket.recv(1024)
if not data:
continue
data = data.decode('utf-8')
try:
data = json.loads(data)
except Exception as e:
print (e)
continue
# Set it up
# Might need to set up a much higher timeout here as well, AND in node.py sockets
client_socket.setblocking(0)
# Fetch the id and add it to the socket list
id = self.handle_register(data['payload'])
self.sockets[id] = client_socket
# Check if the other sockets have sent something to us
for s in self.sockets.values():
data = self.handle_recv(s)
if data:
self.handle_action(data)
else:
continue
time.sleep(0.2)
# Increase time
self.clock += 1
if (self.clock % self.blocks_per_hour == 0):
print("================== NEW | |
<reponame>talipovm/Terse
import Tools.HTML
if __name__ == "__main__":
import sys,os
append_path = os.path.abspath(sys.argv[0])[:-20]
print("Append to PYTHONPATH: %s" % (append_path))
sys.path.append(append_path)
import re
import copy
from Tools.file2 import file2
from Tools import ChemicalInfo
from Geometry import Scan,IRC,Geom,ListGeoms
from ElectronicStructure import ElectronicStructure
from Containers import AtomicProps
from Interface.NBO import NBO
import logging
log = logging.getLogger(__name__)
#TODO take advantages from BetterFile
class Gaussian(ElectronicStructure):
"""
Gaussian 09 parser
Analyzes a multiple-step calculation
"""
def __init__(self):
"""
Declares steps (type List)
"""
self.steps = []
def parse(self):
"""
Parses Gaussian log file, step by step
"""
try:
FI = file2(self.file)
log.debug('%s was opened for reading' %(self.file))
except:
log.error('Cannot open %s for reading' %(self.file))
return
while True:
step = GauStep(FI)
step.parse()
step.postprocess()
if step.blank:
break
self.steps.append(step)
FI.close()
log.debug('%s parsed successfully' % (self.file))
return
def webdata(self):
"""
Returns 2 strings with HTML code
"""
we = self.settings.Engine3D()
b1,b2,bb1,bb2,i = '','','','',1
MaxGeoms, n_Freq = 0, 0
b1s = []
for step in self.steps:
MaxGeoms = max(MaxGeoms,len(step.geoms))
if step.vector:
n_Freq = i
self.settings.subcounter += 1
step.statfile = self.settings.real_path('.stat')
b1, b2 = step.webdata(StartApplet=False)
labeltext = '%s: %s' %(step.JobType,step.lot)
b1s.append([b1,labeltext.upper()])
bb2 += b2
i += 1
if b1s:
bb1 = we.JMolApplet(ExtraScript = b1s[n_Freq-1][0])
if MaxGeoms > 1:
bb1 += Tools.HTML.brn + we.html_geom_play_controls()
if n_Freq:
bb1 += Tools.HTML.brn + we.html_vibration_switch()
if len(b1s)>1:
bb1 += Tools.HTML.brn * 2
# add buttons for each step
for b1 in b1s:
bb1 += we.html_button(*b1)
log.debug('webdata generated successfully')
return bb1, bb2
def usage(self):
for step in self.steps:
step.usage()
class GauStep(ElectronicStructure):
"""
Works with a single calculation step
"""
def __init__(self,FI=None):
super().__init__(FI)
self.rc = {
'/' : re.compile('(\S*\/\S+)'),
'iop' : re.compile('iop\((.*?)\)'),
'scrf-solv': re.compile('scrf.*solvent\s*=\s*(\w+)',re.IGNORECASE),
's2' : re.compile(' S\*\*2 before annihilation\s+(\S+),.*?\s+(\S+)$'),
'nbo-bond' : re.compile('\) BD \(.*\s+(\S+)\s*-\s*\S+\s+(\S+)'),
'basis-fn' : re.compile('^ AtFile\(1\):\s+(.*?).gbs'),
'chk' : re.compile('^ %%chk\s*=\s*(\S+)'),
'charge-mult' : re.compile('^ Charge =\s+(\S+)\s+Multiplicity =\s+(\S+)'),
'scf done' : re.compile('^ SCF Done.*?=\s+(\S+)'),
'qcisd_t' : re.compile('^ QCISD\(T\)=\s*(\S+)'),
'scf_conv' : re.compile('^ E=\s*(\S+)'),
'scf_iter' : re.compile('^ Iteration\s+\S+\s+EE=\s*(\S+)'),
'ci_cc_conv' : re.compile('^ DE\(Corr\)=\s*\S+\s*E\(CORR\)=\s*(\S+)'),
'xyz' : re.compile('^\s+\S+\s+(\S+).*\s+(\S+)\s+(\S+)\s+(\S+)\s*$'),
'scan param' : re.compile('^ !\s+(\S+)\s+(\S+)\s+(\S+)\s+Scan\s+!$'),
'frozen' : re.compile('^ !\s+(\S+)\s+(\S+)\s+\S+\s+frozen.*!$',re.IGNORECASE),
'alnum' : re.compile('[a-zA-Z]'),
'ifreq' : re.compile('\d+\s+\d+\s+(\S+)\s+(\S+)\s+(\S+)'),
'excited state' : re.compile('^ Excited State\s+(.*?):.*?\s+(\S+)\s*nm f=\s*(\S+)'),
'scan' : re.compile('Scan\s+!$')
}
self.chash = {}
self.chash['NPA'] = {'Entry': 'XXX-XXX', 'Stop': 'XXX-XXX'}
self.chash['NPA_spin'] = {'Entry': 'XXX-XXX', 'Stop': 'XXX-XXX'}
self.chash['APT'] = {'Entry' : 'APT atomic charges:', 'Stop' : 'Sum of APT' }
self.chash['Mulliken'] = {'Entry' : 'Mulliken atomic charges:', 'Stop' : 'Sum of Mulliken' }
self.lot_nobasis = (
'cbs-qb3','cbs-4m','cbs-apno',
'g1', 'g2', 'g2mp2', 'g3', 'g3mp2', 'g3b3', 'g3mp2b3', 'g4', 'g4mp2', 'g3mp2b3',
'w1u', 'w1bd', 'w1ro',
'b1b95', 'b1lyp', 'b3lyp', 'b3p86', 'b3pw91', 'b95', 'b971', 'b972', 'b97d', 'b98', 'bhandh', 'bhandhlyp', 'bmk', 'brc', 'brx', 'cam-b3lyp', 'g96', 'hcth', 'hcth147', 'hcth407', 'hcth93', 'hfb', 'hfs', 'hse2pbe', 'hseh1pbe', 'hsehpbe', 'kcis', 'lc-wpbe', 'lyp', 'm06', 'm062x', 'm06hf', 'm06l', 'o3lyp', 'p86', 'pbe', 'pbe', 'pbe1pbe', 'pbeh', 'pbeh1pbe', 'pkzb', 'pkzb', 'pw91', 'pw91', 'tpss', 'tpssh', 'v5lyp', 'vp86', 'vsxc', 'vwn', 'vwn5', 'x3lyp', 'xa', 'xalpha', 'mpw', 'mpw1lyp', 'mpw1pbe', 'mpw1pw91', 'mpw3pbe', 'thcth', 'thcthhyb', 'wb97', 'wb97x', 'wb97xd', 'wpbeh',
'mp2', 'mp3', 'mp4', 'mp5', 'b2plyp', 'mpw2plyp',
'ccd','ccsd','ccsd(t)','cid','cisd','qcisd(t)','sac-ci',
'am1','pm3','pm6','cndo','dftba','dftb','zindo','indo',
'amber','dreiding','uff',
'rhf','uhf','hf','casscf','gvb',
)
self.def_basis = (
'3-21g', '6-21g', '4-31g', '6-31g', '6-311g',
'd95v', 'd95', 'shc',
'cep-4g', 'cep-31g', 'cep-121g',
'lanl2mb', 'lanl2dz', 'sdd', 'sddall',
'cc-pvdz', 'cc-pvtz', 'cc-pvqz', 'cc-pv5z', 'cc-pv6z',
'svp', 'sv', 'tzvp', 'tzv', 'qzvp',
'midix', 'epr-ii', 'epr-iii', 'ugbs', 'mtsmall',
'dgdzvp', 'dgdzvp2', 'dgtzvp', 'cbsb7',
'gen','chkbasis',
)
self.irc_direction, self.irc_both = 1, False
self.all_coords = {}
# ------- Helper functions --------
@staticmethod
def inroute(lst,s,add=False):
result = ''
for si in lst:
for sj in s.split():
if si.lower()==sj.lower() or ('u'+si.lower())==sj.lower() or ('r'+si.lower())==sj.lower():
if add:
result += ' '+si
else:
return si
return result
#
@staticmethod
def floatize(x):
if '****' in x:
return 10.
return float(x)
# //----- Helper functions --------
def parse(self):
"""
Actual parsing happens here
"""
t_ifreq_done = False
basis_FN = ''
rc = self.rc
s = 'BLANK' # It got to be initialized!
try:
while True:
next(self.FI)
s = self.FI.s.rstrip()
#
# Try to save some time by skipping parsing of large noninformative blocks of output
#
# Does not work for AM1 calcs
"""
# Skip parsing of SCF iterations
if s.find(' Cycle')==0:
while not s == '':
s = next(self.FI).rstrip()
"""
# Skip parsing of distance matrices
if s.find('Distance matrix (angstroms):')==20:
n = len(self.all_coords[coord_type]['all'][-1])
#print('n=',n)
a1 = n % 5
an = n
num = int((an-a1)/5) + 1
n_lines_to_skip = num * (a1 + an) / 2
if a1==0:
num -= 1
n_lines_to_skip += num * (1+num) / 2
self.FI.skip_n(int(n_lines_to_skip))
s = self.FI.s.rstrip()
#
# ---------------------------------------- Read in cartesian coordinates ----------------------------------
#
# Have we found coords?
enter_coord = False
if ' orientation:' in s:
coord_type = s.split()[0]
enter_coord = True
if s.find(' Cartesian Coordinates (Ang):')==0:
coord_type = 'Cartesian Coordinates (Ang)'
enter_coord = True
# If yes, then read them
if enter_coord:
# Positioning
dashes1 = next(self.FI)
title1 = next(self.FI)
title2 = next(self.FI)
dashes2 = next(self.FI)
s = next(self.FI)
# Read in coordinates
geom = Geom()
atnames = []
while not '-------' in s:
xyz = s.strip().split()
try:
ati, x,y,z = xyz[1], xyz[-3],xyz[-2],xyz[-1]
except:
log.warning('Error reading coordinates:\n%s' % (s))
break
atn = ChemicalInfo.at_name[int(ati)]
atnames.append(atn)
geom.coord.append('%s %s %s %s' % (atn,x,y,z))
s = next(self.FI)
# Add found coordinate to output
pc = AtomicProps(attr='atnames',data=atnames)
geom.addAtProp(pc,visible=False) # We hide it, because there is no use to show atomic names for each geometry using checkboxes
if not coord_type in self.all_coords:
self.all_coords[coord_type] = {'all':ListGeoms(),'special':ListGeoms()}
self.all_coords[coord_type]['all'].geoms.append(geom)
#
# ------------------------------------------- Route lines -------------------------------------------------
#
if s.find(' #')==0:
# Read all route lines
s2 = s
while not '-----' in s2:
self.route_lines += ' ' + s2[1:]
s2 = next(self.FI).rstrip()
self.route_lines = self.route_lines.lower()
self.iop = rc['iop'].findall(self.route_lines)
self.route_lines = re.sub('iop\(.*?\)','',self.route_lines) # Quick and dirty: get rid of slash symbols
# Get Level of Theory
# Look for standard notation: Method/Basis
lot = rc['/'].search(self.route_lines)
# print self.route_lines
if lot:
self.lot, self.basis = lot.group(1).split('/')
if self.basis == 'gen' and basis_FN: # Read basis from external file
self.basis = basis_FN
else:
# Look for method and basis separately using predefined lists of standard methods and bases
lt = self.inroute(self.lot_nobasis,self.route_lines)
if lt:
self.lot = lt
bs = self.inroute(self.def_basis,self.route_lines)
if bs:
self.basis = bs
# Extract %HF in non-standard functionals
for iop in self.iop:
if '3/76' in iop:
encrypted_hf = iop.split('=')[1]
str_hf = encrypted_hf[-5:]
num_hf = float(str_hf[:3]+'.'+str_hf[3:])
self.lot_suffix += '(%.2f %%HF)' %(num_hf)
# Read solvent info
if 'scrf' in self.route_lines:
solvent = rc['scrf-solv'].search(self.route_lines)
if solvent:
self.solvent = solvent.group(1)
# Get job type from the route line
self.route_lines = re.sub('\(.*?\)','',self.route_lines) # Quick and dirty: get rid of parentheses to get a string with only top level commands
self.route_lines = re.sub('=\S*','',self.route_lines) # Quick and dirty: get rid of =... to get a string with only top level commands
jt = self.inroute(('opt','freq','irc'),self.route_lines) # Major job types
if jt:
self.JobType = jt
#print('self.route_lines: ',self.route_lines)
#print('jt',jt)
self.JobType += self.inroute(('td','nmr','stable'),self.route_lines,add=True) # Additional job types
# Recognize job type on the fly
if ' Berny optimization' in s and self.JobType=='sp':
self.JobType = 'opt'
if rc['scan'].search(s):
self.JobType = 'scan'
#
# ---------------------------------------- Read archive section -------------------------------------------
#
if 'l9999.exe' in s and 'Enter' in s:
while not '@' in self.l9999:
s2 = next(self.FI).strip()
if s2=='':
continue
self.l9999 += s2
#print self.l9999
la = self.l9999.replace('\n ','').split('\\')
if len(la)>5:
self.machine_name = la[2]
if la[5]:
self.basis = la[5]
#basis = la[5]
#if basis == 'gen':
#if basis_FN:
#self.basis = ' Basis(?): ' + basis_FN
#elif not self.basis:
#self.basis = ' Basis: n/a'
self.lot = la[4]
self.JobType9999 = la[3]
if self.JobType != self.JobType9999.lower():
self.JobType += "(%s)" % (self.JobType9999.lower())
#
# ---------------------------------------- Read simple values ---------------------------------------------
#
#Nproc
if s.find(' Will use up to') == 0:
self.n_cores = s.split()[4]
# time
if s.find(' Job cpu time:') == 0:
s_splitted = s.split()
try:
n_days = float(s_splitted[3])
n_hours = float(s_splitted[5])
n_mins = float(s_splitted[7])
n_sec = float(s_splitted[9])
self.time = n_days*24 + n_hours + n_mins/60 + n_sec/3600
except:
self.time = '***'
# n_atoms
if s.find('NAtoms=') == 1:
s_splitted = s.split()
self.n_atoms = int(s_splitted[1])
# n_basis
if s.find('basis functions') == 7:
s_splitted = s.split()
self.n_primitives = int(s_splitted[3])
# Basis
if s.find('Standard basis:') == 1:
self.basis = s.strip().split(':')[1]
# n_electrons
if s.find('alpha electrons') == 7:
s_splitted = s.split()
| |
lim_switch : int
forward limit switch or reverse limit switch:
- HOMELIMSW_FWD = 4 : Use forward limit switch for home datum.
- HOMELIMSW_REV = 1 : Use reverse limit switch for home datum.
velocity : float
velocity of the motor
zero_offset : float
zero offset
"""
err_code = _lib.MOT_SetHomeParams(self._serial_number, direction,
lim_switch, velocity, zero_offset)
if (err_code != 0):
raise Exception("Setting move home parameters failed: %s" %
_get_error_text(err_code))
move_home_direction = __property_from_index(0,
get_move_home_parameters, set_move_home_parameters)
"""Homing direction (Forward 1, Reverse 2)."""
move_home_lim_switch = __property_from_index(1,
get_move_home_parameters, set_move_home_parameters)
"""Home limit switch (forward 4, reverse 1)."""
move_home_velocity = __property_from_index(2,
get_move_home_parameters, set_move_home_parameters)
"""Homing velocity."""
move_home_zero_offset = __property_from_index(3,
get_move_home_parameters, set_move_home_parameters)
"""Homing zero offset"""
def get_motor_parameters(self):
"""
Returns motor parameters.
Returns
-------
out : tuple
(steps per revolution, gear box ratio)
"""
steps_per_rev = ctypes.c_long()
gear_box_ratio = ctypes.c_long()
err_code = _lib.MOT_GetMotorParams(self._serial_number,
ctypes.byref(steps_per_rev),
ctypes.byref(gear_box_ratio))
if (err_code != 0):
raise Exception("Failed getting motor parameters: %s" %
_get_error_text(err_code))
else:
return (steps_per_rev.value, gear_box_ratio.value)
def set_motor_parameters(self, steps_per_rev, gear_box_ratio):
"""
Sets motor parameters. Note that this is not possible with all motors,
see documentation from Thorlabs.
Parameters
----------
steps_per_rev : int
steps per revolution
gear_box_ratio : int
gear box ratio
"""
err_code = _lib.MOT_SetMotorParams(self._serial_number, steps_per_rev,
gear_box_ratio)
if (err_code != 0):
raise Exception("Setting motor parameters failed: %s" %
_get_error_text(err_code))
steps_per_revolution = __property_from_index(0, get_motor_parameters,
set_motor_parameters)
"""Motor parameter: Steps per revolution"""
gear_box_ratio = __property_from_index(1, get_motor_parameters,
set_motor_parameters)
"""Motor parameter: Gear box ratio"""
@property
def backlash_distance(self):
"""
Backlash distance.
"""
backlash = ctypes.c_float()
err_code = _lib.MOT_GetBLashDist(self._serial_number,
ctypes.byref(backlash))
if (err_code != 0):
raise Exception("Failed getting backlash distance: %s" %
_get_error_text(err_code))
else:
return backlash.value
@backlash_distance.setter
def blacklash_distance(self, value):
err_code = _lib.MOT_SetBLashDist(self._serial_number, value)
if (err_code != 0):
raise Exception("Setting backlash distance failed: %s" %
_get_error_text(err_code))
def get_stage_axis_info(self):
"""
Returns axis information of stage.
Returns
-------
out : tuple
(minimum position, maximum position, stage units, pitch)
- STAGE_UNITS_MM = 1 : Stage units in mm
- STAGE_UNITS_DEG = 2 : Stage units in degrees
"""
min_pos = ctypes.c_float()
max_pos = ctypes.c_float()
units = ctypes.c_long()
pitch = ctypes.c_float()
err_code = _lib.MOT_GetStageAxisInfo(self._serial_number,
ctypes.byref(min_pos),
ctypes.byref(max_pos),
ctypes.byref(units),
ctypes.byref(pitch))
if (err_code != 0):
raise Exception("Failed getting stage axis information: %s" %
_get_error_text(err_code))
return (min_pos.value, max_pos.value, units.value, pitch.value)
def set_stage_axis_info(self, min_pos, max_pos, units, pitch):
"""
Sets axis information of stage.
Parameters
----------
min_pos : float
minimum position
max_pos : float
maximum position
units : int
stage units:
- STAGE_UNITS_MM = 1 : Stage units in mm
- STAGE_UNITS_DEG = 2 : Stage units in degrees
pitch : float
pitch
"""
err_code = _lib.MOT_SetStageAxisInfo(self._serial_number,
min_pos, max_pos, units, pitch)
if (err_code != 0):
raise Exception("Setting stage axis info failed: %s" %
_get_error_text(err_code))
minimum_position = __property_from_index(0, get_stage_axis_info,
set_stage_axis_info)
"""Stage's minimum position"""
maximum_position = __property_from_index(1, get_stage_axis_info,
set_stage_axis_info)
"""Stage's maximum position"""
units = __property_from_index(2, get_stage_axis_info,
set_stage_axis_info)
"""Stage's units"""
pitch = __property_from_index(3, get_stage_axis_info,
set_stage_axis_info)
"""Stage's pitch"""
def get_hardware_limit_switches(self):
"""
Returns hardware limit switch modes for reverse and forward direction.
Returns
-------
out : tuple
(reverse limit switch, forward limit switch)
HWLIMSWITCH_IGNORE = 1 : Ignore limit switch (e.g. for stages
with only one or no limit switches).
HWLIMSWITCH_MAKES = 2 : Limit switch is activated when electrical
continuity is detected.
HWLIMSWITCH_BREAKS = 3 : Limit switch is activated when electrical
continuity is broken.
HWLIMSWITCH_MAKES_HOMEONLY = 4 : As per HWLIMSWITCH_MAKES except
switch is ignored other than when homing (e.g. to support
rotation stages).
HWLIMSWITCH_BREAKS_HOMEONLY = 5 : As per HWLIMSWITCH_BREAKS except
switch is ignored other than when homing (e.g. to support
rotation stages).
See also
--------
set_hardware_limit_switches
"""
rev = ctypes.c_long()
fwd = ctypes.c_long()
err_code = _lib.MOT_GetHWLimSwitches(self._serial_number,
ctypes.byref(rev), ctypes.byref(fwd))
if (err_code != 0):
raise Exception("Getting hardware limit switches failed: %s" %
_get_error_text(err_code))
return (rev.value, fwd.value)
def set_hardware_limit_switches(self, rev, fwd):
"""
Sets hardware limit switches for reverse and forward direction.
HWLIMSWITCH_IGNORE = 1 : Ignore limit switch (e.g. for stages
with only one or no limit switches).
HWLIMSWITCH_MAKES = 2 : Limit switch is activated when electrical
continuity is detected.
HWLIMSWITCH_BREAKS = 3 : Limit switch is activated when electrical
continuity is broken.
HWLIMSWITCH_MAKES_HOMEONLY = 4 : As per HWLIMSWITCH_MAKES except
switch is ignored other than when homing (e.g. to support
rotation stages).
HWLIMSWITCH_BREAKS_HOMEONLY = 5 : As per HWLIMSWITCH_BREAKS except
switch is ignored other than when homing (e.g. to support
rotation stages).
Parameters
----------
rev : int
reverse limit switch
fwd : int
forward limit switch
"""
err_code = _lib.MOT_SetHWLimSwitches(self._serial_number, rev, fwd)
if (err_code != 0):
raise Exception("Setting hardware limit switches failed: %s" %
_get_error_text(err_code))
reverse_limit_switch = __property_from_index(0,
get_hardware_limit_switches,
set_hardware_limit_switches)
"""Reverse limit switch"""
forward_limit_switch = __property_from_index(1,
get_hardware_limit_switches,
set_hardware_limit_switches)
"""Forward limit switch"""
def get_pid_parameters(self):
"""
Returns PID parameters.
Returns
-------
out : tuple
(proportional, integrator, differentiator, integrator limit)
"""
proportional = ctypes.c_long()
integrator = ctypes.c_long()
differentiator = ctypes.c_long()
integrator_limit = ctypes.c_long()
err_code = _lib.MOT_GetPIDParams(self._serial_number,
ctypes.byref(proportional),
ctypes.byref(integrator),
ctypes.byref(differentiator),
ctypes.byref(integrator_limit))
if (err_code != 0):
raise Exception("Getting PID parameters failed: %s" %
_get_error_text(err_code))
return (proportional.value, integrator.value, differentiator.value,
integrator_limit.value)
def set_pid_parameters(self, proportional, integrator, differentiator,
integrator_limit):
"""
Sets PID parameters.
Parameters
----------
proportional : int
integrator : int
differentiator : int
integrator_limit : int
"""
err_code = _lib.MOT_SetPIDParams(self._serial_number, proportional,
integrator, differentiator, integrator_limit)
if (err_code != 0):
raise Exception("Setting PID parameters failed: %s" %
_get_error_text(err_code))
pid_proportional = __property_from_index(0, get_pid_parameters,
set_pid_parameters)
"""PID controller: Proportional"""
pid_integrator = __property_from_index(1, get_pid_parameters,
set_pid_parameters)
"""PID controller: integrator"""
pid_differentiator = __property_from_index(2, get_pid_parameters,
set_pid_parameters)
"""PID controller: Differentiator term"""
pid_integrator_limit = __property_from_index(3, get_pid_parameters,
set_pid_parameters)
"""PID controller: Integrator limit"""
def move_to(self, value, blocking = False):
"""
Move to absolute position.
Parameters
----------
value : float
absolute position of the motor
blocking : bool
wait until moving is finished.
Default: False
"""
err_code = _lib.MOT_MoveAbsoluteEx(self._serial_number, value,
blocking)
if (err_code != 0):
raise Exception("Setting absolute position failed: %s" %
_get_error_text(err_code))
def move_by(self, value, blocking = False):
"""
Move relative to current position.
Parameters
----------
value : float
relative distance
blocking : bool
wait until moving is finished
Default: False
"""
err_code = _lib.MOT_MoveRelativeEx(self._serial_number, value,
blocking)
if (err_code != 0):
raise Exception("Setting relative position failed: %s" %
_get_error_text(err_code))
@property
def position(self):
"""
Position of motor. Setting the position is absolute and non-blocking.
"""
pos = ctypes.c_float()
err_code = _lib.MOT_GetPosition(self._serial_number,
ctypes.byref(pos))
if (err_code != 0):
raise Exception("Getting position failed: %s" %
_get_error_text(err_code))
return pos.value
@position.setter
def position(self, value):
self.move_to(value, False)
def move_home(self, blocking = False):
"""
Move to home position.
Parameters
----------
blocking : bool
wait until homed
Default: False
"""
err_code = _lib.MOT_MoveHome(self._serial_number, blocking)
if (err_code != 0):
raise Exception("Moving velocity failed: %s" %
_get_error_text(err_code))
def move_velocity(self, direction):
"""
Parameters
----------
direction : int
MOVE_FWD = 1 : Move forward
MOVE_REV = 2 : Move reverse
"""
err_code = _lib.MOT_MoveVelocity(self._serial_number, direction)
if (err_code != 0):
raise Exception("Moving velocity failed: %s" %
_get_error_text(err_code))
def stop_profiled(self):
"""
Stop motor but turn down velocity slowly (profiled).
"""
err_code = _lib.MOT_StopProfiled(self._serial_number)
if (err_code != 0):
raise Exception("Stop profiled failed: %s" %
_get_error_text(err_code))
def get_dc_current_loop_parameters(self):
"""
Returns DC current loop parameters.
Returns
-------
out : tuple
(proportional, integrator, integrator_limit, integrator_dead_band,
fast_forward)
"""
proportional = ctypes.c_long()
integrator = ctypes.c_long()
integrator_limit = ctypes.c_long()
integrator_dead_band = ctypes.c_long()
fast_forward = ctypes.c_long()
err_code = _lib.MOT_GetDCCurrentLoopParams(self._serial_number,
ctypes.byref(proportional),
ctypes.byref(integrator),
ctypes.byref(integrator_limit),
ctypes.byref(integrator_dead_band),
ctypes.byref(fast_forward))
if (err_code != 0):
raise Exception("Getting DC current loop parameters failed: %s" %
_get_error_text(err_code))
return (proportional.value, integrator.value, integrator_limit.value,
integrator_dead_band.value, fast_forward.value)
def set_dc_current_loop_parameters(self, proportional, integrator,
integrator_limit, integrator_dead_band, fast_forward):
"""
Sets DC current loop parameters.
Parameters
----------
proportional : int
integrator : int
integrator_limit : int
integrator_dead_band : int
fast_forward : int
"""
err_code = _lib.MOT_SetDCCurrentLoopParams(self._serial_number,
proportional, integrator, integrator_limit,
integrator_dead_band, fast_forward)
if (err_code != 0):
raise Exception("Setting DC current loop parameters failed: %s" %
_get_error_text(err_code))
dc_current_loop_proportional = __property_from_index(0,
get_dc_current_loop_parameters,
set_dc_current_loop_parameters)
"""DC current loop: proportional term"""
dc_current_loop_integrator = __property_from_index(1,
get_dc_current_loop_parameters,
set_dc_current_loop_parameters)
"""DC current loop: integrator term"""
dc_current_loop_integrator_limit = __property_from_index(2,
get_dc_current_loop_parameters,
set_dc_current_loop_parameters)
"""DC current loop: integrator limit"""
dc_current_loop_integrator_dead_band = __property_from_index(3,
get_dc_current_loop_parameters,
set_dc_current_loop_parameters)
"""DC current loop: integrator dead band"""
dc_current_loop_fast_forward = | |
# -*- coding: utf-8 -*-
from __future__ import annotations
import json
from contextlib import suppress
from time import perf_counter
from time import sleep
from typing import Callable
from typing import Optional
import click
from pioreactor import error_codes
from pioreactor import exc
from pioreactor import hardware
from pioreactor import structs
from pioreactor.background_jobs.base import BackgroundJob
from pioreactor.config import config
from pioreactor.utils import clamp
from pioreactor.utils import local_persistant_storage
from pioreactor.utils.gpio_helpers import GPIO_states
from pioreactor.utils.gpio_helpers import set_gpio_availability
from pioreactor.utils.pwm import PWM
from pioreactor.utils.streaming_calculations import PID
from pioreactor.utils.timing import current_utc_timestamp
from pioreactor.utils.timing import RepeatedTimer
from pioreactor.whoami import get_latest_experiment_name
from pioreactor.whoami import get_unit_name
class RpmCalculator:
"""
Super class for determining how to calculate the RPM from the hall sensor.
We do some funky things with RPi.GPIO here.
1) to minimize global imports, we import in init, and attach the module to self.
2) More egregious: we previously had this class call `add_event_detect` and afterwards `remove_event_detect`
in each __call__ - this made sure that we were saving CPU resources when we were not measuring the RPM.
This was causing `Bus error`, and crashing Python. What I think was happening was that the folder
`/sys/class/gpio/gpio25` was constantly being written and deleted in each __call__, causing problems with the
SD card. Anyways, what we do now is turn the pin from IN to OUT inbetween the calls to RPM measurement. This
is taken care of in `turn_{on,off}_collection`. Flipping this only writes to `/sys/class/gpio/gpio15/direction` once.
Examples
-----------
> rpm_calc = RpmCalculator()
> rpm_calc.setup()
> rpm_calc(seconds_to_observe=1.5)
"""
hall_sensor_pin = hardware.HALL_SENSOR_PIN
def __init__(self) -> None:
pass
def setup(self) -> None:
# we delay the setup so that when all other checks are done (like in stirring's uniqueness), we can start to
# use the GPIO for this.
set_gpio_availability(self.hall_sensor_pin, GPIO_states.GPIO_UNAVAILABLE)
import RPi.GPIO as GPIO # type: ignore
self.GPIO = GPIO
self.GPIO.setmode(self.GPIO.BCM)
self.GPIO.setup(self.hall_sensor_pin, self.GPIO.IN, pull_up_down=self.GPIO.PUD_UP)
# ignore any changes that occur within 15ms - at 1000rpm (very fast), the
# delta between changes is ~60ms, so 15ms is good enough.
self.GPIO.add_event_detect(
self.hall_sensor_pin, self.GPIO.FALLING, callback=self.callback, bouncetime=15
)
self.turn_off_collection()
def turn_off_collection(self) -> None:
self.collecting = False
self.GPIO.setup(self.hall_sensor_pin, self.GPIO.OUT)
def turn_on_collection(self) -> None:
self.collecting = True
self.GPIO.setup(self.hall_sensor_pin, self.GPIO.IN, pull_up_down=self.GPIO.PUD_UP)
def cleanup(self) -> None:
self.GPIO.cleanup(self.hall_sensor_pin)
set_gpio_availability(self.hall_sensor_pin, GPIO_states.GPIO_AVAILABLE)
def __call__(self, seconds_to_observe: float) -> float:
return 0.0
def callback(self, *args) -> None:
pass
def sleep_for(self, seconds) -> None:
sleep(seconds)
def __enter__(self) -> RpmCalculator:
return self
def __exit__(self, *args) -> None:
self.cleanup()
class RpmFromFrequency(RpmCalculator):
"""
Averages the duration between rises in an N second window. This is more accurate (but less robust)
than RpmFromCount
"""
_running_sum = 0
_running_count = 0
_start_time = None
def callback(self, *args) -> None:
if not self.collecting:
return
obs_time = perf_counter()
if self._start_time is not None:
self._running_sum += obs_time - self._start_time
self._running_count += 1
self._start_time = obs_time
def clear_aggregates(self) -> None:
self._running_sum = 0
self._running_count = 0
self._start_time = None
def __call__(self, seconds_to_observe: float) -> float:
self.clear_aggregates()
self.turn_on_collection()
self.sleep_for(seconds_to_observe)
self.turn_off_collection()
if self._running_sum == 0:
return 0
else:
# we should be able to detect if we are missing pings, as they would be a near integer
# of the average of "okay" values.
# measured Δ time vs index
# |
# Δ |
# | x x
# |xxxxxxxxxxxxxxxxxxxx
# --------------------|
# index
#
# however, we need the array, but we only record the running counts/sums
# solution: running max and running min.
# False positives are when the duty cycle significantly changes however...
# if self._running_max > 1.75 * self._running_min:
# self.logger.debug(
# f"RpmCalculator is possible skipping some signal: {self._running_max=}, {self._running_min=}."
# )
return self._running_count * 60 / self._running_sum
class RpmFromCount(RpmCalculator):
"""
Counts the number of rises in an N second window.
"""
_rpm_counter = 0
def callback(self, *args) -> None:
self._rpm_counter = self._rpm_counter + 1
def __call__(self, seconds_to_observe: float) -> float:
self._rpm_counter = 0
self.turn_on_collection()
self.sleep_for(seconds_to_observe)
self.turn_off_collection()
return self._rpm_counter * 60 / seconds_to_observe
class Stirrer(BackgroundJob):
"""
Parameters
------------
target_rpm: float
Send message to "pioreactor/{unit}/{experiment}/stirring/target_rpm/set" to change the stirring speed.
rpm_calculator: RpmCalculator
See RpmCalculator and examples below.
Notes
-------
The create a feedback loop between the duty-cycle level and the RPM, we set up a polling algorithm. We set up
an edge detector on the hall sensor pin, and count the number of pulses in N seconds. We convert this count to RPM, and
then use a PID system to update the amount of duty cycle to apply.
We perform the above every N seconds. That is, there is PID controller that checks every N seconds and nudges the duty cycle
to match the requested RPM.
Examples
---------
> st = Stirrer(500, unit, experiment)
> st.start_stirring()
"""
published_settings = {
"target_rpm": {"datatype": "float", "settable": True, "unit": "RPM"},
"measured_rpm": {"datatype": "MeasuredRPM", "settable": False, "unit": "RPM"},
"duty_cycle": {"datatype": "float", "settable": True, "unit": "%"},
}
_previous_duty_cycle: float = 0
duty_cycle: float = config.getfloat(
"stirring", "initial_duty_cycle"
) # only used if calibration isn't defined.
_measured_rpm: Optional[float] = None
def __init__(
self,
target_rpm: float,
unit: str,
experiment: str,
rpm_calculator: Optional[RpmCalculator],
hertz: float = config.getfloat("stirring", "pwm_hz"),
) -> None:
super(Stirrer, self).__init__(job_name="stirring", unit=unit, experiment=experiment)
self.logger.debug(f"Starting stirring with initial {target_rpm} RPM.")
self.rpm_calculator = rpm_calculator
if not hardware.is_HAT_present():
self.logger.error("Pioreactor HAT must be present.")
self.clean_up()
raise exc.HardwareNotFoundError("Pioreactor HAT must be present.")
if (self.rpm_calculator is not None) and not hardware.is_heating_pcb_present():
self.logger.error("Heating PCB must be present to measure RPM.")
self.clean_up()
raise exc.HardwareNotFoundError("Heating PCB must be present to measure RPM.")
if self.rpm_calculator is not None:
self.rpm_calculator.setup()
pin = hardware.PWM_TO_PIN[config.get("PWM_reverse", "stirring")]
self.pwm = PWM(pin, hertz, unit=unit, experiment=experiment)
self.pwm.lock()
self.target_rpm = target_rpm
self.rpm_to_dc_lookup = self.initialize_rpm_to_dc_lookup(self.target_rpm)
self.duty_cycle = self.rpm_to_dc_lookup(self.target_rpm)
# set up PID
self.pid = PID(
Kp=config.getfloat("stirring.pid", "Kp"),
Ki=config.getfloat("stirring.pid", "Ki"),
Kd=config.getfloat("stirring.pid", "Kd"),
setpoint=self.target_rpm,
unit=self.unit,
experiment=self.experiment,
job_name=self.job_name,
target_name="rpm",
output_limits=(-15, 15), # avoid whiplashing
)
# set up thread to periodically check the rpm
self.rpm_check_repeated_thread = RepeatedTimer(
31,
self.poll_and_update_dc,
job_name=self.job_name,
run_immediately=True,
run_after=5,
poll_for_seconds=4, # technically should be a function of the RPM: lower RPM, longer to get sufficient estimate with low variance.
)
def initialize_rpm_to_dc_lookup(self, target_rpm: float) -> Callable:
if self.rpm_calculator is None:
# if we can't track RPM, no point in adjusting DC, use what is in config.ini
return lambda rpm: self.duty_cycle
with local_persistant_storage("stirring_calibration") as cache:
if "linear_v1" in cache:
parameters = json.loads(cache["linear_v1"])
coef = parameters["rpm_coef"]
intercept = parameters["intercept"]
# since we have calibration data, and the initial_duty_cycle could be
# far off, giving the below equation a bad "first step". We set it here.
self.duty_cycle = coef * target_rpm + intercept
# we scale this by 90% to make sure the PID + prediction doesn't overshoot,
# better to be conservative here.
# equivalent to a weighted average: 0.1 * current + 0.9 * predicted
return lambda rpm: self.duty_cycle - 0.90 * (
self.duty_cycle - (coef * rpm + intercept)
)
else:
return lambda rpm: self.duty_cycle
def on_disconnected(self) -> None:
with suppress(AttributeError):
self.rpm_check_repeated_thread.cancel()
with suppress(AttributeError):
self.stop_stirring()
self.pwm.cleanup()
with suppress(AttributeError):
if self.rpm_calculator:
self.rpm_calculator.cleanup()
def start_stirring(self) -> None:
self.pwm.start(100) # get momentum to start
sleep(0.10)
self.set_duty_cycle(self.duty_cycle)
sleep(0.75)
self.rpm_check_repeated_thread.start() # .start is idempotent
def poll(self, poll_for_seconds: float) -> Optional[float]:
"""
Returns an RPM, or None if not measuring RPM.
"""
if self.rpm_calculator is None:
return None
recent_rpm = self.rpm_calculator(poll_for_seconds)
if recent_rpm == 0:
# TODO: attempt to restart stirring
self.logger.warning("Stirring RPM is 0 - has it failed?")
self.blink_error_code(error_codes.STIRRING_FAILED_ERROR_CODE)
if self._measured_rpm is not None:
# use a simple EMA, alpha chosen arbitrarily, but should be a function of delta time.
self._measured_rpm = 0.1 * self._measured_rpm + 0.90 * recent_rpm
else:
self._measured_rpm = recent_rpm
self.measured_rpm = structs.MeasuredRPM(
timestamp=current_utc_timestamp(), measured_rpm=self._measured_rpm
)
return self._measured_rpm
def poll_and_update_dc(self, poll_for_seconds: float) -> None:
self.poll(poll_for_seconds)
if self._measured_rpm is None:
return
result = self.pid.update(self._measured_rpm)
self.set_duty_cycle(self.duty_cycle + result)
def stop_stirring(self) -> None:
# if the user unpauses, we want to go back to their previous value, and not the default.
self.set_duty_cycle(0)
def on_ready_to_sleeping(self) -> None:
self.rpm_check_repeated_thread.pause()
self.stop_stirring()
def on_sleeping_to_ready(self) -> None:
self.duty_cycle = self._previous_duty_cycle
self.rpm_check_repeated_thread.unpause()
self.start_stirring()
def set_duty_cycle(self, value: float) -> None:
self._previous_duty_cycle = self.duty_cycle
self.duty_cycle = clamp(0, round(value, 5), 100)
self.pwm.change_duty_cycle(self.duty_cycle)
def set_target_rpm(self, value: float) -> None:
self.target_rpm = value
self.set_duty_cycle(self.rpm_to_dc_lookup(self.target_rpm))
self.pid.set_setpoint(self.target_rpm)
def block_until_rpm_is_close_to_target(self, abs_tolerance: float = 15) -> | |
# Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Simple seq2seq model definitions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from models import attention_utils
from regularization import variational_dropout
from six.moves import xrange
FLAGS = tf.app.flags.FLAGS
def transform_input_with_is_missing_token(inputs, targets_present):
"""Transforms the inputs to have missing tokens when it's masked out. The
mask is for the targets, so therefore, to determine if an input at time t is
masked, we have to check if the target at time t - 1 is masked out.
e.g.
inputs = [a, b, c, d]
targets = [b, c, d, e]
targets_present = [1, 0, 1, 0]
which computes,
inputs_present = [1, 1, 0, 1]
and outputs,
transformed_input = [a, b, <missing>, d]
Args:
inputs: tf.int32 Tensor of shape [batch_size, sequence_length] with tokens
up to, but not including, vocab_size.
targets_present: tf.bool Tensor of shape [batch_size, sequence_length] with
True representing the presence of the word.
Returns:
transformed_input: tf.int32 Tensor of shape [batch_size, sequence_length]
which takes on value of inputs when the input is present and takes on
value=vocab_size to indicate a missing token.
"""
# To fill in if the input is missing.
input_missing = tf.constant(
FLAGS.vocab_size,
dtype=tf.int32,
shape=[FLAGS.batch_size, FLAGS.sequence_length])
# The 0th input will always be present to MaskGAN.
zeroth_input_present = tf.constant(True, tf.bool, shape=[FLAGS.batch_size, 1])
# Input present mask.
inputs_present = tf.concat(
[zeroth_input_present, targets_present[:, :-1]], axis=1)
transformed_input = tf.where(inputs_present, inputs, input_missing)
return transformed_input
# TODO(adai): IMDB labels placeholder to encoder.
def gen_encoder(hparams, inputs, targets_present, is_training, reuse=None):
"""Define the Encoder graph.
Args:
hparams: Hyperparameters for the MaskGAN.
inputs: tf.int32 Tensor of shape [batch_size, sequence_length] with tokens
up to, but not including, vocab_size.
targets_present: tf.bool Tensor of shape [batch_size, sequence_length] with
True representing the presence of the target.
is_training: Boolean indicating operational mode (train/inference).
reuse (Optional): Whether to reuse the variables.
Returns:
Tuple of (hidden_states, final_state).
"""
# We will use the same variable from the decoder.
if FLAGS.seq2seq_share_embedding:
with tf.variable_scope('decoder/rnn'):
embedding = tf.get_variable('embedding',
[FLAGS.vocab_size, hparams.gen_rnn_size])
with tf.variable_scope('encoder', reuse=reuse):
def lstm_cell():
return tf.contrib.rnn.BasicLSTMCell(
hparams.gen_rnn_size,
forget_bias=0.0,
state_is_tuple=True,
reuse=reuse)
attn_cell = lstm_cell
if is_training and hparams.gen_vd_keep_prob < 1:
def attn_cell():
return variational_dropout.VariationalDropoutWrapper(
lstm_cell(), FLAGS.batch_size, hparams.gen_rnn_size,
hparams.gen_vd_keep_prob, hparams.gen_vd_keep_prob)
cell = tf.contrib.rnn.MultiRNNCell(
[attn_cell() for _ in range(hparams.gen_num_layers)],
state_is_tuple=True)
initial_state = cell.zero_state(FLAGS.batch_size, tf.float32)
# Add a missing token for inputs not present.
real_inputs = inputs
masked_inputs = transform_input_with_is_missing_token(
inputs, targets_present)
with tf.variable_scope('rnn') as scope:
hidden_states = []
# Split the embedding into two parts so that we can load the PTB
# weights into one part of the Variable.
if not FLAGS.seq2seq_share_embedding:
embedding = tf.get_variable('embedding',
[FLAGS.vocab_size, hparams.gen_rnn_size])
missing_embedding = tf.get_variable('missing_embedding',
[1, hparams.gen_rnn_size])
embedding = tf.concat([embedding, missing_embedding], axis=0)
# TODO(adai): Perhaps append IMDB labels placeholder to input at
# each time point.
real_rnn_inputs = tf.nn.embedding_lookup(embedding, real_inputs)
masked_rnn_inputs = tf.nn.embedding_lookup(embedding, masked_inputs)
state = initial_state
def make_mask(keep_prob, units):
random_tensor = keep_prob
# 0. if [keep_prob, 1.0) and 1. if [1.0, 1.0 + keep_prob)
random_tensor += tf.random_uniform(
tf.stack([FLAGS.batch_size, 1, units]))
return tf.floor(random_tensor) / keep_prob
if is_training:
output_mask = make_mask(hparams.gen_vd_keep_prob, hparams.gen_rnn_size)
hidden_states, state = tf.nn.dynamic_rnn(
cell, masked_rnn_inputs, initial_state=state, scope=scope)
if is_training:
hidden_states *= output_mask
final_masked_state = state
# Produce the RNN state had the model operated only
# over real data.
real_state = initial_state
_, real_state = tf.nn.dynamic_rnn(
cell, real_rnn_inputs, initial_state=real_state, scope=scope)
final_state = real_state
return (hidden_states, final_masked_state), initial_state, final_state
# TODO(adai): IMDB labels placeholder to encoder.
def gen_encoder_cnn(hparams, inputs, targets_present, is_training, reuse=None):
"""Define the CNN Encoder graph."""
del reuse
sequence = transform_input_with_is_missing_token(inputs, targets_present)
# TODO(liamfedus): Make this a hyperparameter.
dis_filter_sizes = [3, 4, 5, 6, 7, 8, 9, 10, 15, 20]
# Keeping track of l2 regularization loss (optional)
# l2_loss = tf.constant(0.0)
with tf.variable_scope('encoder', reuse=True):
with tf.variable_scope('rnn'):
embedding = tf.get_variable('embedding',
[FLAGS.vocab_size, hparams.gen_rnn_size])
cnn_inputs = tf.nn.embedding_lookup(embedding, sequence)
# Create a convolution layer for each filter size
conv_outputs = []
for filter_size in dis_filter_sizes:
with tf.variable_scope('conv-%s' % filter_size):
# Convolution Layer
filter_shape = [
filter_size, hparams.gen_rnn_size, hparams.dis_num_filters
]
W = tf.get_variable(
name='W', initializer=tf.truncated_normal(filter_shape, stddev=0.1))
b = tf.get_variable(
name='b',
initializer=tf.constant(0.1, shape=[hparams.dis_num_filters]))
conv = tf.nn.conv1d(cnn_inputs, W, stride=1, padding='SAME', name='conv')
# Apply nonlinearity
h = tf.nn.relu(tf.nn.bias_add(conv, b), name='relu')
conv_outputs.append(h)
# Combine all the pooled features
dis_num_filters_total = hparams.dis_num_filters * len(dis_filter_sizes)
h_conv = tf.concat(conv_outputs, axis=2)
h_conv_flat = tf.reshape(h_conv, [-1, dis_num_filters_total])
# Add dropout
if is_training:
with tf.variable_scope('dropout'):
h_conv_flat = tf.nn.dropout(h_conv_flat, hparams.gen_vd_keep_prob)
# Final (unnormalized) scores and predictions
with tf.variable_scope('output'):
W = tf.get_variable(
'W',
shape=[dis_num_filters_total, hparams.gen_rnn_size],
initializer=tf.contrib.layers.xavier_initializer())
b = tf.get_variable(
name='b', initializer=tf.constant(0.1, shape=[hparams.gen_rnn_size]))
# l2_loss += tf.nn.l2_loss(W)
# l2_loss += tf.nn.l2_loss(b)
predictions = tf.nn.xw_plus_b(h_conv_flat, W, b, name='predictions')
predictions = tf.reshape(
predictions,
shape=[FLAGS.batch_size, FLAGS.sequence_length, hparams.gen_rnn_size])
final_state = tf.reduce_mean(predictions, 1)
return predictions, (final_state, final_state)
# TODO(adai): IMDB labels placeholder to decoder.
def gen_decoder(hparams,
inputs,
targets,
targets_present,
encoding_state,
is_training,
is_validating,
reuse=None):
"""Define the Decoder graph. The Decoder will now impute tokens that
have been masked from the input seqeunce.
"""
gen_decoder_rnn_size = hparams.gen_rnn_size
targets = tf.Print(targets, [targets], message='targets', summarize=50)
if FLAGS.seq2seq_share_embedding:
with tf.variable_scope('decoder/rnn', reuse=True):
embedding = tf.get_variable('embedding',
[FLAGS.vocab_size, hparams.gen_rnn_size])
with tf.variable_scope('decoder', reuse=reuse):
def lstm_cell():
return tf.contrib.rnn.BasicLSTMCell(
gen_decoder_rnn_size,
forget_bias=0.0,
state_is_tuple=True,
reuse=reuse)
attn_cell = lstm_cell
if is_training and hparams.gen_vd_keep_prob < 1:
def attn_cell():
return variational_dropout.VariationalDropoutWrapper(
lstm_cell(), FLAGS.batch_size, hparams.gen_rnn_size,
hparams.gen_vd_keep_prob, hparams.gen_vd_keep_prob)
cell_gen = tf.contrib.rnn.MultiRNNCell(
[attn_cell() for _ in range(hparams.gen_num_layers)],
state_is_tuple=True)
# Hidden encoder states.
hidden_vector_encodings = encoding_state[0]
# Carry forward the final state tuple from the encoder.
# State tuples.
state_gen = encoding_state[1]
if FLAGS.attention_option is not None:
(attention_keys, attention_values, _,
attention_construct_fn) = attention_utils.prepare_attention(
hidden_vector_encodings,
FLAGS.attention_option,
num_units=gen_decoder_rnn_size,
reuse=reuse)
def make_mask(keep_prob, units):
random_tensor = keep_prob
# 0. if [keep_prob, 1.0) and 1. if [1.0, 1.0 + keep_prob)
random_tensor += tf.random_uniform(tf.stack([FLAGS.batch_size, units]))
return tf.floor(random_tensor) / keep_prob
if is_training:
output_mask = make_mask(hparams.gen_vd_keep_prob, hparams.gen_rnn_size)
with tf.variable_scope('rnn'):
sequence, logits, log_probs = [], [], []
if not FLAGS.seq2seq_share_embedding:
embedding = tf.get_variable('embedding',
[FLAGS.vocab_size, hparams.gen_rnn_size])
softmax_w = tf.matrix_transpose(embedding)
softmax_b = tf.get_variable('softmax_b', [FLAGS.vocab_size])
rnn_inputs = tf.nn.embedding_lookup(embedding, inputs)
# TODO(adai): Perhaps append IMDB labels placeholder to input at
# each time point.
rnn_outs = []
fake = None
for t in xrange(FLAGS.sequence_length):
if t > 0:
tf.get_variable_scope().reuse_variables()
# Input to the Decoder.
if t == 0:
# Always provide the real input at t = 0.
rnn_inp = rnn_inputs[:, t]
# If the input is present, read in the input at t.
# If the input is not present, read in the previously generated.
else:
real_rnn_inp = rnn_inputs[:, t]
# While validating, the decoder should be operating in teacher
# forcing regime. Also, if we're just training with cross_entropy
# use teacher forcing.
if is_validating or FLAGS.gen_training_strategy == 'cross_entropy':
rnn_inp = real_rnn_inp
else:
fake_rnn_inp = tf.nn.embedding_lookup(embedding, fake)
rnn_inp = tf.where(targets_present[:, t - 1], real_rnn_inp,
fake_rnn_inp)
# RNN.
rnn_out, state_gen = cell_gen(rnn_inp, state_gen)
if FLAGS.attention_option is not None:
rnn_out = attention_construct_fn(rnn_out, attention_keys,
attention_values)
if is_training:
rnn_out *= output_mask
rnn_outs.append(rnn_out)
if FLAGS.gen_training_strategy != 'cross_entropy':
logit = tf.nn.bias_add(tf.matmul(rnn_out, softmax_w), softmax_b)
# Output for Decoder.
# If input is present: Return real at t+1.
# If input is not present: Return fake for t+1.
real = targets[:, t]
categorical = tf.contrib.distributions.Categorical(logits=logit)
if FLAGS.use_gen_mode:
fake = categorical.mode()
else:
fake = categorical.sample()
log_prob = categorical.log_prob(fake)
output = tf.where(targets_present[:, t], real, fake)
else:
real = targets[:, t]
logit = tf.zeros(tf.stack([FLAGS.batch_size, FLAGS.vocab_size]))
log_prob = tf.zeros(tf.stack([FLAGS.batch_size]))
output = real
# Add to lists.
sequence.append(output)
log_probs.append(log_prob)
logits.append(logit)
if FLAGS.gen_training_strategy == 'cross_entropy':
logits = tf.nn.bias_add(
tf.matmul(
tf.reshape(tf.stack(rnn_outs, 1), [-1, gen_decoder_rnn_size]),
softmax_w), softmax_b)
logits = tf.reshape(logits,
[-1, FLAGS.sequence_length, FLAGS.vocab_size])
else:
logits = tf.stack(logits, axis=1)
return (tf.stack(sequence, axis=1), logits, tf.stack(log_probs, axis=1))
def dis_encoder(hparams, masked_inputs, is_training, reuse=None,
embedding=None):
"""Define the Discriminator encoder. Reads in the masked inputs for context
and produces the | |
<filename>zemcy/support_lib.py<gh_stars>0
'''
window = (left, top, w, h)
box = ((center_x, center_y), (a, b), angle) // or rect
((0.0, 0.0), (0.0, 0.0), 0.0)
(-angle) is angle of vecto of a is created by center to right and Ox
! points is np.array
! points la contour
box <-> points -> window <-> two points:
cnt/points -> box: new_box = cv.minAreaRect(points) voi points la np.array
box -> points: points = cv.boxPoints(box) ; points = np.int0(points)
points -> window: cv.boundingRect(points) voi points la np.array
box -> window: box->points->window
window -> two points: two_points = window_to_two_points(window)
two points -> window: window = two_points_to_window(two_points)
boxs = contours_to_boxs(contours)
rotate:
box-> points: convert_points_by_rotation_matrix(points, matrix)
window > points
draw:
points: cv.polylines(img,points,True,(0,255,255))
cv.drawContours(draw_resized_img, rect_contours, -1, (0, 255, 0), 1 )
box: box -> points
windows: draw_windows(img, windows, color, thickness)
numpy:
'''
# Python 2/3 compatibility
from __future__ import print_function, division
import sys
PY3 = sys.version_info[0] == 3
if PY3:
xrange = range
import os
from glob import glob
import math
import numpy as np
import cv2 as cv
try:
import statistics
import imutils
from matplotlib import pyplot as plt
from PIL import Image,ImageFont, ImageDraw
except:
pass
import random
from numpy import (array, dot, arccos, clip)
from numpy.linalg import norm
DEBUG = False
VISION_SIZE = '480x270'
STANDARD_SIZE = '960x540'
STANDARD_AREA = 960*540
STANDARD_AREA_DICT = {'1': 1024*576, '2': 960*540, '3': 640*360}
global global_colors
global_colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0), (0, 255, 255), (255, 0, 255)]
# global_colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255)]
global global_color_index
global_color_index = 0
def points_to_box(points):
return cv.minAreaRect(points)
def box_to_points(box):
points = cv.boxPoints(box)
points = np.int0(points)
return points
def points_to_window(points):
return cv.boundingRect(points)
def box_to_window(box):
points = box_to_points(box)
return points_to_window(points)
def window_to_two_points(window):
left, top, w, h = window
topleft = [left, top]
bottomright = [left + w, top +h]
return [topleft, bottomright]
def window_to_unit(window):
left, top, w, h = window
unit = (_center_x, _center_y), (_w, _h) = (left + w//2, top + h//2), (w, h)
return unit
def two_points_to_window(points):
topleft, bottomright = points
left, top = topleft
w, h = bottomright[0] - left, bottomright[1] - top
return (left, top, w, h)
def add_padding_to_img(img, padding):
width, height = _resolution = get_resolution(img)
padding_width, padding_height = width + 2*padding, height + 2*padding
padding_img = create_img((padding_width, padding_height))
padding_img[padding: height + padding, padding: width + padding] = img
return padding_img
def boxs_to_imgs_with_padding(img, boxs, is_w_bigger_than_h = True, padding = 5):
padding_img = add_padding_to_img(img, padding)
padding_box_imgs = []
for box in boxs:
(center_x, center_y), (a, b), angle = box
padding_box = (center_x + padding, center_y + padding), (a + 2*padding, b+ 2*padding), angle
padding_box_img = box_to_img(padding_img, padding_box, is_w_bigger_than_h)
padding_box_imgs.append(padding_box_img)
return padding_box_imgs
def box_to_img(img, box, is_w_bigger_than_h = True):
img_width, img_height = get_resolution(img)
(center_x, center_y), (a, b), angle = box
(new_w, new_h) = (int(a), int(b)) if a>b else (int(b), int(a))
bounding_window = box_to_window(box)
_left, _top, w, h = bounding_window
simple_cut_img_w, simple_cut_img_h = max(new_w, w), max(new_h, h)
padding = - int(min(center_x - simple_cut_img_w, img_width - center_x -
simple_cut_img_w, center_y - simple_cut_img_h,
img_height - center_y - simple_cut_img_h, 0))
if padding != 0:
simple_cut_window = int(center_x - simple_cut_img_w/2) + padding, int(center_y - simple_cut_img_h/2) + padding, simple_cut_img_w, simple_cut_img_h
padding_img = add_padding_to_img(img, padding)
cut_window_img = cut_window(padding_img, simple_cut_window)
else:
simple_cut_window = int(center_x - simple_cut_img_w/2), int(center_y - simple_cut_img_h/2), simple_cut_img_w, simple_cut_img_h
cut_window_img = cut_window(img, simple_cut_window)
rotate_angle = abs(abs(angle +45)-45) if angle < -45 or angle > 0 else -abs(abs(angle +45)-45)
if False:
print('box = ', box)
print('boungding_window = ', bounding_window)
print('simple_cut_window = ', simple_cut_window)
if (is_w_bigger_than_h and w < h) or (is_w_bigger_than_h is False and w > h):
consider90_rotate_angle = rotate_angle + 90
else:
consider90_rotate_angle = rotate_angle
if False:
print('consider90_rotate_angle = ', consider90_rotate_angle)
rotated_cut_window_img = rotate_bound(cut_window_img, -consider90_rotate_angle)
rotated_img_w, rotated_img_h = get_resolution(rotated_cut_window_img)
new_center_x, new_center_y = rotated_img_w//2, rotated_img_h//2
if False:
print('new_center_x, new_center_y = ', new_center_x, new_center_y)
if False:
cv.imshow('rotated_cut_window_img', rotated_cut_window_img)
cv.waitKey(0)
rotated_window = int(new_center_x - new_w//2), int(new_center_y -new_h//2), int(new_w), int(new_h)
cut_box_img = cut_window(rotated_cut_window_img, rotated_window)
if False:
cv.imshow('cut_box_img', cut_box_img)
cv.waitKey(0)
return cut_box_img
#
def contours_to_boxs(contours):
boxs = [points_to_box(contour) for contour in contours]
return boxs
def boxs_to_points_array(boxs):
return [box_to_points(box) for box in boxs]
def boxs_to_imgs(img, boxs, is_w_bigger_than_h = True):
box_imgs = [box_to_img(img, box, is_w_bigger_than_h) for box in boxs]
return box_imgs
def convert_coord_for_point(o_point, point):
o_x, o_y = o_point
x, y = point
new_point = o_x + x, o_y +y
return new_point
def convert_minus_h_window(minus_h_window):
left, top, w, h = minus_h_window
window = left, top + h , w, -h
return window
def convert_minus_h_windows(minus_h_windows):
windows = []
for minus_h_window in minus_h_windows:
window = convert_minus_h_window(minus_h_window)
windows.append(window)
return windows
def convert_coord_for_window(o_point, window):
o_x, o_y = o_point
left, top, w, h = window
new_window = o_x + left, o_y + top, w, h
return new_window
def add_padding_window(resolution, window, width_padding_rate, height_padding_rate):
left, top, w, h = window
bottomright_x, bottomright_y = left + w, top + h
width, height = resolution
padding_width, padding_height = int(w*width_padding_rate), int(h*height_padding_rate)
delta_w, delta_h = padding_width - w, padding_height - h
if left - delta_w//2 < 0:
left = 0
else:
left -= delta_w//2
if bottomright_x + delta_w//2 > width:
bottomright_x = width
else:
bottomright_x += delta_w//2
if top - delta_h//2 < 0:
top = 0
else:
top -= delta_h//2
if bottomright_y + delta_h//2 > height:
bottomright_y = height
else:
bottomright_y += delta_h//2
new_window = left, top, bottomright_x - left, bottomright_y - top
return new_window
# draw
def draw_windows(img, windows, color=(0, 255, 0), thickness=1):
for window in windows:
bx, by, bw, bh = window
cv.rectangle(img, (bx, by), (bx+bw, by+bh), color, thickness)
def draw_points_array(img, points_array, color = (0,255,255), thickness = 3):
cv.drawContours(img, points_array, -1, color, thickness )
# cv.polylines(img,points_array,True, color)
def get_new_color():
global global_color_index
global global_colors
color_number = len(global_colors)
color = global_colors[global_color_index%color_number]
global_color_index += 1
return color
def draw_boxs(img, boxs, color = (0,255,255), thickness = 3):
for box in boxs:
if color == -1:
actual_color = get_new_color()
else:
actual_color = color
points = box_to_points(box)
draw_points_array(img, [points], actual_color, thickness)
def draw_points(img, points, color = (0,0,255), radius=2, thickness=-1):
for point in points:
cv.circle(img , point, radius, color, thickness)
def draw_np_where_points(img, y_x_array, color = (0,0,255)):
y_array, _x_array = y_x_array
if len(y_array) == 0:
print('len(y_array) = ', len(y_array))
return
points = np_where_to_points(y_x_array)
if False:
print('points = ', points)
draw_points(img, points)
def draw_information(frame, showing_window_name=None, recognitions = None, infor_dict=None, window_color=(0, 255, 0), thickness=1, full_screen=False, lang='eng'):
standard_resolution = 960, 540
infor_window = (20, 50, 200, 200)
resolution = _frame_w, frame_h = get_resolution(frame)
infor_l, infor_t, infor_w, infor_h = infor_window = convert_windows([infor_window], standard_resolution, resolution)[0]
if recognitions:
for recognition in recognitions:
draw_windows(frame, [recognition.window], color=recognition.color, thickness=recognition.thickness)
l, t, _w, _h = recognition.window
if lang == 'vie':
draw_text(frame, recognition.strg, recognition.window, color=(0, 0, 255))
elif lang == 'eng':
cv.putText(frame, recognition.strg, (l, t - 5), cv.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255), 2)
else:
print('Language to draw is not accepted!')
exit()
if infor_dict:
n_infor = len(infor_dict)
distance = min(infor_h/n_infor, frame_h/10)
for i, (key, value) in enumerate(infor_dict.items()):
item_t = infor_t + int(i*distance)
cv.putText(frame, str(key) + ': ' + str(value), (infor_l, item_t), cv.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2)
if showing_window_name is None:
return
elif full_screen == True:
cv.namedWindow(showing_window_name, cv.WND_PROP_FULLSCREEN)
cv.setWindowProperty(showing_window_name,cv.WND_PROP_FULLSCREEN,cv.WINDOW_FULLSCREEN)
cv.imshow(showing_window_name, frame)
def draw_text(img, text, window, fontpath="vni-full-standard/font-times-new-roman.ttf", color=(0, 255, 0)):
n_line = len(text.split('\n'))
l, t, w, h = window
font_size = int(12*h/(n_line*10))
font = ImageFont.truetype(fontpath, font_size)
pil_img = Image.fromarray(img)
draw = ImageDraw.Draw(pil_img)
b,g,r,a = color[0], color[1], color[2], 0
draw.text((l, t), text, font = font, fill = (b, g, r, a))
img[...] = np.array(pil_img)
# video
video_extentions = ['mp4', 'avi']
img_extentions = ['jpeg', 'jpg', 'png', 'JPG', 'PNG', 'JPEG']
def is_video_type(video_uri):
for video_extention in video_extentions:
if video_uri.endswith(video_extention):
return True
return False
def is_img_type(video_uri):
for img_extention in img_extentions:
if '.' + img_extention in video_uri:
return True
return False
def split_uri_parameters(uri):
source = str(uri).strip()
chunks = source.split(':')
params = dict( s.split('=') for s in chunks[1:] )
return params
def create_stream_video(video_uri, queueSize=8, fps=15):
try:
video_uri = int(video_uri)
except:
pass
uri_type = None
if isinstance(video_uri, int) or video_uri.startswith('rtsp'):
uri_type = 'camera'
elif video_uri.startswith('screen'):
params = split_uri_parameters(video_uri)
resolution = size_to_resolution(params['size'])
import pyautogui
class ScreenCapture:
def __init__(self):
pass
def read(self):
pil_img = pyautogui.screenshot()
img = np.array(pil_img)
img = cv.resize(img, resolution)
if 'window' in params:
window = text_to_window(params['window'])
img = cut_window(img, window)
return True, img
screen_capture = ScreenCapture()
return screen_capture
if uri_type is None:
if is_video_type(video_uri):
uri_type = 'video'
if uri_type is None:
if is_img_type(video_uri):
uri_type = 'image'
if uri_type is None:
print('Video uri is not accepted!')
exit()
if uri_type == 'image':
import video
camera = video.create_capture(video_uri, None)
else:
import videostream
if uri_type == 'video':
camera = videostream.QueuedStream(video_uri, fps=fps).start()
elif uri_type == 'camera':
camera = videostream.QueuedStream(video_uri, queueSize=queueSize).start()
else:
print('Something wrong when create camera!')
exit()
return camera
# basic img
def get_img_area(img):
height, width, _channels = img.shape
return height*width
def get_center(image):
w, h = image.shape[:2]
return (w//2, h//2)
# def resize_img(img, cvt_area):
# width, height = _img_revolution = img.shape[1::-1]
# area = width*height
# ratio = math.sqrt(1.0*cvt_area/area)
# cvt_width = int(width*ratio)
# cvt_height = int(height*ratio)
# resize_shape = (cvt_width, cvt_height)
# resized_img = cv.resize(img, resize_shape)
# return resized_img
def cut_window(img, window):
if DEBUG:
print('window = ', window)
return img.copy()[window_to_slice(window)]
def get_resolution(img):
resolution = _weight, _height = img.shape[1::-1]
return resolution
def get_new_resolution(resolution, proposal_width=None, new_area=None):
w, h = resolution
if proposal_width:
new_width = min(proposal_width, w)
ratio = float(w) / new_width
elif new_area:
area = w*h
ratio = math.sqrt(1.0*new_area/area)
new_width = int(w*ratio)
else:
print("Use proposal_width or new_area as parameter!")
exit()
new_height = int(h*ratio)
new_resolution = new_width, new_height
return new_resolution
def resize_img(img, proposal_width=None, new_area=None):
resolution = get_resolution(img)
new_width, _ = new_resolution = get_new_resolution(resolution, proposal_width=proposal_width, new_area=new_area)
resized_img = imutils.resize(img, width=new_width)
return resized_img, new_resolution
def convert_windows(windows, source_resolution, des_resolution):
des_w, des_h = des_resolution
source_w, source_h = source_resolution
w_ratio, h_ratio = float(des_w) / source_w, float(des_h)/source_h
new_windows = []
for window in windows:
l, t, w, h = window
new_window = int(l*w_ratio), int(t*h_ratio), int(w*w_ratio), int(h*h_ratio)
new_windows.append(new_window)
return new_windows
def check_window_in_img(resolution, window):
img_w, img_h = resolution
l, t, w, h = window
r, b = l + w, t + h
for x in [l, r]:
if x < 0 or x > img_w:
return False
for y in [t, b]:
if y < 0 or y > img_h:
return False
return True
def get_size_area(size):
w, h = size
area = w*h
return area
def size_to_resolution(size):
return tuple(map(lambda element: int(element), size.split('x')))
def get_resize_ratio(size,stardard_area):
size_area = get_size_area(size)
ratio = math.sqrt(1.0*stardard_area/size_area)
return ratio
def get_resize_slice_step(size,stardard_area, min_step=None):
ratio = get_resize_ratio(size,stardard_area)
if min_step is not None:
step = max(int(1/ratio), min_step)
else:
step = int(1/ratio)
w_slice_step, h_slice_step = step, step
return | |
from torch.utils.data import Dataset
import h5py
import numpy as np
import cv2
import torch
import math
from preprocessing.utils import incorporate_ratio
from preprocessing.utils import visualise_image
class DataFromH5py(Dataset):
def __init__(self, file_path, idx_sets, purpose, input_type, label_type, only_one_mask = False, other_sample_entries = ["future_centroid"],transform=None):
#Data and data manipulations
self.f = h5py.File(file_path, "r")
self.transform = transform
self.future_time = self.f['future_time'].value[0]
self.number_of_inputs = self.f['number_inputs'].value[0]
self.timestep = self.f['timestep'].value[0]
#Train / Test / Val splits
self.purpose = purpose #train /test /val
self.idx_sets = idx_sets
self.len = idx_sets[purpose].shape[0]
#Sample parameters
self.label_type = label_type
self.input_type = input_type
self.other_sample_entries = other_sample_entries
self.only_one_mask = only_one_mask
#Data general parameters
self.initial_dims = (self.f['datapoint1']['images'].shape[0], self.f['datapoint1']['images'].shape[1])
if(hasattr(transform, 'h' ) and hasattr(transform , 'w')):
self.resized_dims =(transform.h, transform.w)
def __len__(self):
return self.len
def __getitem__(self, idx):
datapoint_idx = self.idx_sets[self.purpose][idx]
frame = "datapoint{}".format(datapoint_idx)
#TODO: maybe use a different technique than reshaping
inputs_images = []
inputs_masks = []
for in_type in self.input_type:
inp = self.f[frame][in_type].value
if(len(inp.shape)==4):
for i in range(inp.shape[3]):
inputs_images.append(inp[:,:,:,i])
elif(len(inp.shape)==3):
#Length 3 inputs are masks, so convert them to integers and max them out
if(self.only_one_mask):
inputs_masks.append(inp[:,:,0].astype(int) * 255)
else:
inputs_masks.append(inp.astype(int)*255)
elif (len(inp.shape) == 2):
# Length 2 inputs are bboxes, so convert them to masks
for i in range(inp.shape[0]):
bbox_mask = np.zeros(self.initial_dims)
ymin, xmin, ymax, xmax = inp[i,:]
bbox_mask[ymin:ymax,xmin:xmax]=1
inputs_masks.append(bbox_mask)
if (self.only_one_mask):
break
else:
raise ValueError("Inputs can have 2, 3 or 4 dimentions")
inputs = inputs_images+inputs_masks
inputs = np.dstack(inputs)
if(self.label_type == 'future_bbox'):
label = np.zeros(self.initial_dims)
ymin, xmin, ymax, xmax = self.f[frame][self.label_type].value
label[ymin:ymax,xmin:xmax]=1
else:
label = self.f[frame][self.label_type].value
sample = {'input': inputs.astype(np.float), 'label': label.astype(np.float)}
for key in self.other_sample_entries:
sample[key] = self.f[frame][key].value
if self.transform:
sample = self.transform(sample)
return sample
def get_raw(self, idx):
datapoint_idx = self.idx_sets[self.purpose][idx]
frame = "datapoint{}".format(datapoint_idx)
inputs_images = []
inputs_masks = []
for in_type in self.input_type:
inp = self.f[frame][in_type].value
if(len(inp.shape)==4):
for i in range(inp.shape[3]):
inputs_images.append(inp[:,:,:,i])
elif(len(inp.shape)==3):
#Length 3 inputs are masks, so convert them to integers and max them out
if(self.only_one_mask):
inputs_masks.append(inp[:,:,0].astype(int) * 255)
else:
for i in range(inp.shape[2]):
inputs_masks.append(inp[:,:,i].astype(int)*255)
elif (len(inp.shape) == 2):
# Length 2 inputs are bboxes, so convert them to masks
for i in range(inp.shape[0]):
bbox_mask = np.zeros(self.initial_dims)
ymin, xmin, ymax, xmax = inp[i,:]
bbox_mask[ymin:ymax,xmin:xmax]=1
inputs_masks.append(bbox_mask)
if (self.only_one_mask):
break
else:
raise ValueError("Inputs can have 2, 3 or 4 dimentions")
inputs = inputs_images+inputs_masks
if(self.label_type == 'future_bbox'):
label = np.zeros(self.initial_dims)
ymin, xmin, ymax, xmax = self.f[frame][self.label_type].value
label[ymin:ymax,xmin:xmax]=1
else:
label = self.f[frame][self.label_type].value
sample = {'input': inputs, 'label': label, 'input_images': inputs_images, 'input_masks': inputs_masks}
for key in self.other_sample_entries:
sample[key] = self.f[frame][key].value
return sample
def get_datapoint_index(self,idx):
datapoint_idx = self.idx_sets[self.purpose][idx]
return datapoint_idx
class ToTensor(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, sample):
new_sample = sample
input = sample['input']
# swap color axis because
# numpy image: H x W x C
# torch image: C X H X W
input = input.transpose((2, 0, 1))
new_sample['input'] = torch.from_numpy(input / 255)
new_sample['label'] = torch.from_numpy(sample['label'])
return new_sample
class ResizeSample(object):
def __init__(self, height = 128, width = 256, label_height = 'same', label_width = 'same'):
self.h = int(height)
self.w = int(width)
if(label_height =='same'):
self.l_h = height
else:
self.l_h = label_height
if(label_width =='same'):
self.l_w =width
else:
self.l_w = label_width
def __call__(self,sample):
input, label = sample['input'], sample['label']
new_sample = sample
input = cv2.resize(input, (self.w, self.h))
label = cv2.resize(label, (self.l_w, self.l_h))
new_sample['input'] = input
new_sample['label'] = (label > 0.5).astype(np.float)
return new_sample
class RandomCropWithAspectRatio(object):
def __init__(self, max_crop = 10):
self.max_crop = max_crop
def __call__(self, sample):
input, label = sample['input'], sample['label']
new_sample = sample
initial_h, initial_w = input.shape[0], input.shape[1]
crop_amount = np.random.randint(self.max_crop)
new_w = initial_w - crop_amount
ratio = new_w/initial_w
new_h = int(round(ratio*initial_h))
min_h = initial_h - new_h
min_w = initial_w - new_w
crop_type = np.random.randint(3)
if(crop_type == 0):
input = input[min_h:new_h,min_w:new_w,:]
label = label[min_h:new_h, min_w:new_w]
elif(crop_type == 1):
input = input[:new_h, :new_w, :]
label = label[:new_h, :new_w]
elif(crop_type == 2):
input = input[min_h:, min_w:, :]
label = label[min_h:, min_w:]
new_sample['input'] = input
new_sample['label'] = label
return new_sample
class RandomHorizontalFlip(object):
def __init__(self, chance = 0.5):
self.chance = chance
def __call__(self, sample):
input, label = sample['input'], sample['label']
new_sample = sample
do_it = np.random.uniform()
if (do_it >= self.chance):
return sample
input = cv2.flip(input, 1)
label = cv2.flip(label, 1)
new_sample['input'] = input
new_sample['label'] = label
return new_sample
class RandomRotation(object):
def __init__(self, rotation_range = 2):
self.rotation_range = rotation_range
def __call__(self,sample):
input, label = sample['input'], sample['label']
new_sample = sample
rows, cols = input.shape[0], input.shape[1]
rotation_angle = np.random.uniform(-self.rotation_range, self.rotation_range)
M = cv2.getRotationMatrix2D((cols / 2, rows / 2), rotation_angle , 1)
input = cv2.warpAffine(input, M, (cols, rows))
label = cv2.warpAffine(label, M, (cols, rows))
new_sample['input'] = input
new_sample['label'] = label
return new_sample
class RandomNoise(object):
def __init__(self, noise_range = 50, chance = 0.3):
self.noise_range = noise_range
self.chance = chance
def __call__(self,sample):
input, label = sample['input'], sample['label']
new_sample = sample
rows, cols = input.shape[0], input.shape[1]
do_it = np.random.uniform()
if(do_it >= self.chance):
return sample
for inp_idx in range(input.shape[-1]):
if(np.unique(input[:,:, inp_idx]).shape[0]==2):
continue
rand_array = np.random.randint(-self.noise_range,self.noise_range,(rows,cols))
input[:,:,inp_idx]+=rand_array
input[:, :, inp_idx] = np.minimum(input[:,:,inp_idx], 255)
input[:, :, inp_idx] = np.maximum(input[:,:,inp_idx], 0)
new_sample['input'] = input
return new_sample
############## DOUBLE DATA ###############################
class DataFromDoubleH5py(Dataset):
def __init__(self, file_path0,file_path1, idx_sets0,idx_sets1, purpose, input_type, label_type, only_one_mask = False, other_sample_entries = ["future_centroid"],transform=None):
#Data and data manipulations
self.f0 = h5py.File(file_path0, "r")
self.f1 = h5py.File(file_path1,"r")
self.transform = transform
self.future_time0 = self.f0['future_time'].value[0]
self.number_of_inputs0 = self.f0['number_inputs'].value[0]
self.timestep0 = self.f0['timestep'].value[0]
self.future_time1 = self.f1['future_time'].value[0]
self.number_of_inputs1 = self.f1['number_inputs'].value[0]
self.timestep1 = self.f1['timestep'].value[0]
self.number_of_inputs = self.number_of_inputs0
self.timestep = self.timestep0
#Train / Test / Val splits
self.purpose = purpose #train /test /val
self.idx_sets0 = idx_sets0
self.idx_sets1 = idx_sets1
self.len0 = idx_sets0[purpose].shape[0]
self.len1 = idx_sets1[purpose].shape[0]
#Sample parameters
self.label_type = label_type
self.input_type = input_type
self.other_sample_entries = other_sample_entries
self.only_one_mask = only_one_mask
#Data general parameters
self.initial_dims0 = (self.f0['datapoint1']['images'].shape[0], self.f0['datapoint1']['images'].shape[1])
self.initial_dims1 = (self.f1['datapoint1']['images'].shape[0], self.f1['datapoint1']['images'].shape[1])
self.initial_dims = self.initial_dims0
if(hasattr(transform, 'h' ) and hasattr(transform , 'w')):
self.resized_dims =(transform.h, transform.w)
def __len__(self):
return self.len0+self.len1 #potential bug
def __getitem__(self, idx):
dataset_0 = False
if(idx < self.len0):
dataset_0 = True
if(dataset_0):
datapoint_idx = self.idx_sets0[self.purpose][idx]
frame = "datapoint{}".format(datapoint_idx)
#TODO: maybe use a different technique than reshaping
inputs_images = []
inputs_masks = []
for in_type in self.input_type:
inp = self.f0[frame][in_type].value
if(len(inp.shape)==4):
for i in range(inp.shape[3]):
inputs_images.append(inp[:,:,:,i])
elif(len(inp.shape)==3):
#Length 3 inputs are masks, so convert them to integers and max them out
if(self.only_one_mask):
inputs_masks.append(inp[:,:,0].astype(int) * 255)
else:
inputs_masks.append(inp.astype(int)*255)
elif (len(inp.shape) == 2):
# Length 2 inputs are bboxes, so convert them to masks
for i in range(inp.shape[0]):
bbox_mask = np.zeros(self.initial_dims0)
ymin, xmin, ymax, xmax = inp[i,:]
bbox_mask[ymin:ymax,xmin:xmax]=1
inputs_masks.append(bbox_mask)
if (self.only_one_mask):
break
else:
raise ValueError("Inputs can have 2, 3 or 4 dimentions")
inputs = inputs_images+inputs_masks + [np.zeros(self.initial_dims0)]
inputs = np.dstack(inputs)
if(self.label_type == 'future_bbox'):
label = np.zeros(self.initial_dims0)
ymin, xmin, ymax, xmax = self.f0[frame][self.label_type].value
label[ymin:ymax,xmin:xmax]=1
else:
label = self.f0[frame][self.label_type].value
sample = {'input': inputs.astype(np.float), 'label': label.astype(np.float)}
for key in self.other_sample_entries:
sample[key] = self.f0[frame][key].value
if self.transform:
sample = self.transform(sample)
return sample
else:
datapoint_idx = self.idx_sets1[self.purpose][idx - self.len0]
frame = "datapoint{}".format(datapoint_idx)
# TODO: maybe use a different technique than reshaping
inputs_images = []
inputs_masks = []
for in_type in self.input_type:
inp = self.f1[frame][in_type].value
if (len(inp.shape) == 4):
for i in range(inp.shape[3]):
inputs_images.append(inp[:, :, :, i])
elif (len(inp.shape) == 3):
# Length 3 inputs are masks, so convert them to integers and max them out
if (self.only_one_mask):
inputs_masks.append(inp[:, :, 0].astype(int) * 255)
else:
inputs_masks.append(inp.astype(int) * 255)
elif (len(inp.shape) == 2):
# Length 2 inputs are bboxes, so convert them to masks
for i in range(inp.shape[0]):
bbox_mask = np.zeros(self.initial_dims1)
ymin, xmin, ymax, xmax = inp[i, :]
bbox_mask[ymin:ymax, xmin:xmax] = 1
inputs_masks.append(bbox_mask)
if (self.only_one_mask):
break
else:
raise ValueError("Inputs can have 2, 3 or 4 dimentions")
inputs = inputs_images + inputs_masks + [np.ones(self.initial_dims1)*255]
inputs = np.dstack(inputs)
if (self.label_type == 'future_bbox'):
label = np.zeros(self.initial_dims1)
ymin, xmin, ymax, xmax = self.f1[frame][self.label_type].value
label[ymin:ymax, xmin:xmax] = 1
else:
label = self.f1[frame][self.label_type].value
sample = {'input': inputs.astype(np.float), 'label': label.astype(np.float)}
for key in self.other_sample_entries:
sample[key] = self.f1[frame][key].value
if self.transform:
sample = self.transform(sample)
return sample
def get_raw(self, idx):
dataset_0 = False
if (idx < self.len0):
dataset_0 = True
if (dataset_0):
datapoint_idx = self.idx_sets0[self.purpose][idx]
frame = "datapoint{}".format(datapoint_idx)
inputs_images = []
inputs_masks = []
for in_type in self.input_type:
inp = self.f0[frame][in_type].value
if(len(inp.shape)==4):
for i in range(inp.shape[3]):
inputs_images.append(inp[:,:,:,i])
elif(len(inp.shape)==3):
#Length 3 inputs are masks, so convert them to integers and max them out
if(self.only_one_mask):
inputs_masks.append(inp[:,:,0].astype(int) * | |
# Distributed under the Apache License, Version 2.0.
# See accompanying NOTICE file for details.
import numpy as np
import subprocess
# WGS84 constants
_a = 6378137
_f = 1/(298257223563/1000000000)
_e2 = _f*(2-_f)
_e2m = np.square(1-_f)
_e2a = abs(_e2)
_e4a = np.square(_e2)
epsilon = np.finfo(float).eps
_maxrad = 2 * _a / epsilon
def llh_to_enu(lat, lon, h, lat0, lon0, h0, in_degrees=True, pure_python=True):
"""Convert east, north, and up to latitude, longitude, and altitude.
East, north, and up are coordinates within a local level Cartesian
coordinate system with origin at geodetic latitude lat0, longitude lon0,
and height h0 above the WGS84 ellipsoid. Up is normal to the ellipsoid
surface and north is in the direction of the true north.
:param lat: Geodetic latitude of the point to convert.
:type lat: float
:param lon: Longitude of the point to convert.
:type lon: float
:param h: Height above WGS84 ellipsoid of the point to convert (meters).
:type h: float
:param lat0: Geodetic latitude of the local east/north/up coordinate
system.
:type lat: float
:param lon0: Longitude of the local east/north/up coordinate system.
:type lon: float
:param h0: Height above the WGS84 ellipsoid of the local east/north/up
coordinate system (meters).
:type height: float
:param in_degrees: Specify that all angles are in degrees.
:type in_degrees: bool
:param pure_python: Specify whether to use the pure-Python implementation
(faster) or the reference implementation from GeographicLib's command
line call to CartConvert.
:type pure_python: bool
:return: East, north, and up coordinates (meters) of the converted point.
:rtype: list
Example:
lat = 35.906437
lon = -79.056282
h = 123
lat0 = 35.905446
lon0 = -79.060788
h0 = 0
"""
if not in_degrees:
lat = lat*180/np.pi
lon = lon*180/np.pi
lat0 = lat0*180/np.pi
lon0 = lon0*180/np.pi
if pure_python:
sphi, cphi = sincosd(lat0)
slam, clam = sincosd(lon0)
_r = geocentric_rotation(sphi, cphi, slam, clam)
xc,yc,zc = llh_to_ecef(lat, lon, h, in_degrees=True)
_x0,_y0,_z0 = llh_to_ecef(lat0, lon0, h0, in_degrees=True)
xc -= _x0; yc -= _y0; zc -= _z0;
x = _r[0] * xc + _r[3] * yc + _r[6] * zc;
y = _r[1] * xc + _r[4] * yc + _r[7] * zc;
z = _r[2] * xc + _r[5] * yc + _r[8] * zc;
return [x,y,z]
else:
output = subprocess.check_output(['CartConvert','-l',
str(lat0),str(lon0),
str(h0),'--input-string',
' '.join([str(lat),str(lon),str(h)])])
return [float(s) for s in output.split('\n')[0].split(' ')]
def enu_to_llh(east, north, up, lat0, lon0, h0, in_degrees=True,
pure_python=True):
"""Convert latitude, longitude, and height to east, north, up.
East, north, and up are coordinates within a local level Cartesian
coordinate system with origin at geodetic latitude lat0, longitude lon0,
and height h0 above the WGS84 ellipsoid. Up is normal to the ellipsoid
surface and north is in the direction of the true north.
:param east: Easting coordinate (meters) of the point to convert.
:type east: float
:param north: Northing coordinate (meters) of the point to convert.
:type north: float
:param up: Up coordinate (meters) of the point to convert.
:type up: float
:param lat0: Geodetic latitude of the local east/north/up coordinate
system.
:type lat: float
:param lon0: Longitude of the local east/north/up coordinate system.
:type lon: float
:param h0: Height above the WGS84 ellipsoid of the local east/north/up
coordinate system (meters).
:type height: float
:param in_degrees: Specify that all angles are in degrees.
:type in_degrees: bool
:param pure_python: Specify whether to use the pure-Python implementation
(faster) or the reference implementation from GeographicLib's command
line call to CartConvert.
:type pure_python: bool
:return: Geodetic latitude, longitude, and height (meters) above the WGS84
ellipsoid of the converted point.
:rtype: list
Example:
lat = 35.906437
lon = -79.056282
h = 123
lat0 = 35.905446
lon0 = -79.060788
h0 = 0
"""
if not in_degrees:
lat0 = lat0*180/np.pi
lon0 = lon0*180/np.pi
if pure_python:
x, y, z = east, north, up
sphi, cphi = sincosd(lat0)
slam, clam = sincosd(lon0)
_r = geocentric_rotation(sphi, cphi, slam, clam)
_x0,_y0,_z0 = llh_to_ecef(lat0, lon0, h0, in_degrees=True)
xc = _x0 + _r[0] * x + _r[1] * y + _r[2] * z,
yc = _y0 + _r[3] * x + _r[4] * y + _r[5] * z,
zc = _z0 + _r[6] * x + _r[7] * y + _r[8] * z;
lat, lon, h = ecef_to_llh(xc, yc, zc, in_degrees)
else:
output = subprocess.check_output(['CartConvert','-r','-l',str(lat0),
str(lon0),str(h0),'--input-string',
' '.join([str(east),str(north),
str(up)])])
lat, lon, h = [float(s) for s in output.split('\n')[0].split(' ')]
if not in_degrees:
lat = lat*180/np.pi
lon = lon*180/np.pi
return [lat,lon,h]
def ecef_to_llh(X, Y, Z, in_degrees=True):
"""
Ported from GeographicLib.
:param X: ECEF x coordinate (meters).
:type X: float
:param X: ECEF x coordinate (meters).
:type X: float
:param X: ECEF x coordinate (meters).
:type X: float
:param in_degrees: Specify that all angles are in degrees.
:type in_degrees: bool
:return: Geodetic latitude, longitude, and height (meters) above the WGS84
ellipsoid of the converted point.
:rtype: list
Example:
x = 5634247
y = 2050698
z = 2167698
"""
R = np.hypot(X,Y)
if R == 0:
slam = 0
clam = 1
else:
slam = Y / R
clam = X / R
h = np.hypot(R,Z) # Distance to center of earth
if (h > _maxrad):
# We really far away (> 12 million light years) treat the earth as a
# point and h, above, is an acceptable approximation to the height.
# This avoids overflow, e.g., in the computation of disc below. It's
# possible that h has overflowed to inf but that's OK.
#
# Treat the case X, Y finite, but R overflows to +inf by scaling by 2.
R = np.hypot(X/2, Y/2)
if R == 0:
slam = 0
clam = 1
else:
slam = (Y/2) / R
clam = (X/2) / R
H = np.hypot(Z/2,R)
sphi = (Z/2) / H
cphi = R / H
elif _e4a == 0:
# Treat the spherical case. Dealing with underflow in the general case
# with _e2 = 0 is difficult. Origin maps to N pole same as with
# ellipsoid.
if h == 0:
H = np.hypot(1, R)
sphi = 1 / H
else:
H = np.hypot(Z, R)
sphi = Z / H
cphi = R / H
h -= _a
else:
# Treat prolate spheroids by swapping R and Z here and by switching
# the arguments to phi = atan2(...) at the end.
p = np.square(R / _a)
q = _e2m * np.square(Z / _a)
r = (p + q - _e4a) / 6
if _f < 0:
p,q = q,p
if not (_e4a * q == 0 and r <= 0):
# Avoid possible division by zero when r = 0 by multiplying
# equations for s and t by r^3 and r, resp.
S = _e4a * p * q / 4 # S = r^3 * s
r2 = np.square(r)
r3 = r * r2
disc = S * (2 * r3 + S)
u = r
if (disc >= 0):
T3 = S + r3
# Pick the sign on the sqrt to maximize abs(T3). This
# minimizes loss of precision due to cancellation. The result
# is unchanged because of the way the T is used in definition
# of u.
if T3 < 0: # T3 = (r * t)^3
T3 += -np.sqrt(disc)
else:
T3 += np.sqrt(disc)
# N.B. cbrt always returns the real root. cbrt(-8) = -2.
T = np.cbrt(T3) # T = r * t
# T can be zero but then r2 / T -> 0.
if T != 0:
u += T + (r2 / T)
else:
# T is complex, but the way u is defined the result is real.
ang = np.arctan2(np.sqrt(-disc), -(S + r3))
# There are three possible cube roots. We choose the root
# which avoids cancellation. Note that disc < 0 implies that
# r < 0.
u += 2 * r * np.cos(ang / 3)
v = np.sqrt(np.square(u) + _e4a * q) # guaranteed positive
# Avoid loss of accuracy when u < 0. Underflow | |
<reponame>aitoralmeida/lus_stratification
import os
import numpy as np
import argparse
from sklearn import metrics
from random import shuffle, sample, seed
import tensorflow as tf
from tensorflow import keras
from tensorflow.random import set_seed
from tensorflow.keras.preprocessing import image
from tensorflow.keras.layers import Dense, GlobalAveragePooling2D
from tensorflow.keras.models import Model
from tensorflow.keras.applications.mobilenet import MobileNet
from tensorflow.keras.applications.mobilenet import preprocess_input as preprocess_input_v1
from tensorflow.keras.applications.mobilenet_v2 import MobileNetV2
from tensorflow.keras.applications.mobilenet_v2 import preprocess_input as preprocess_input_v2
from tensorflow.keras.applications.efficientnet import *
#TODO add to args
TEST_SET_PATIENTS = ['04_','09_','18_','21_','27_','36_','38_','41_','51_','55_','59_','60_']
def create_sets(path, positive, negative, model_name, model_version, model, train_test_divide):
files_covid= os.listdir(path)
total_files = len(files_covid)
print ('Total files in disk:', total_files)
#randomize the files
shuffle(files_covid)
#find positive and negative files
print('*'*10)
print('Separating posititive and negative files...')
print('Positive token:', positive)
print('Negative token', negative)
positive_files = []
negative_files = []
for name in files_covid:
if negative in name:
negative_files.append(name)
elif positive in name:
positive_files.append(name)
total_positive = len(positive_files)
print ('Total positive files:', total_positive)
total_negative = len(negative_files)
print ('Total negative files:', total_negative)
#sanity check
print('>>>>>Sanity check...')
print ('Expected total files:', total_files)
print ('Total files positive+negative:', total_positive+total_negative)
#calculating splits
#train
total_train_pos = int(total_positive * train_test_divide)
total_train_neg = int(total_negative * train_test_divide)
print('*'*10)
print('Calculating splits...')
print('Training positive:', total_train_pos)
print('Training positive percentage:', float(total_train_pos/(total_train_pos+total_train_neg)))
print('Training negative:', total_train_neg)
print('Training negative percentage:', float(total_train_neg/(total_train_pos+total_train_neg)))
total_train = total_train_pos+total_train_neg
print('Training total:', total_train)
#val
test_pos = total_positive - total_train_pos
test_neg = total_negative - total_train_neg
test_total = test_pos + test_neg
print('Test positive:', test_pos)
print('Test positive percentage:', float(test_pos/test_total))
print('Test negative:', test_neg)
print('Test negative percentage:', float(test_neg/test_total))
print('Test total:', test_total)
#sanity check
print('>>>>>Sanity check...')
print('Target divide perecentage:', train_test_divide)
print('Train percentage', (float)(total_train/(total_train+test_total)))
print('Test percentage', (float)(test_total/(total_train+test_total)))
print ('Expected total files::', total_files)
print ('Total files train+val:', total_train+test_total)
#<NAME>
print('*'*10)
print('Loading file names...')
print('Total positive', len(positive_files))
print('Total negative', len(negative_files))
print('Expected train pos:', total_train_pos)
print('Expected train neg:', total_train_neg)
#train
train_positive_filenames = positive_files[:total_train_pos]
train_negative_filenames = negative_files[:total_train_neg]
train_files = train_positive_filenames + train_negative_filenames
#sanity check
print('>>>>>Sanity check...')
print('Expected train positive:', total_train_pos)
print('Actual train positive:', len(train_positive_filenames))
print('Expected train negative:', total_train_neg)
print('Actual train negative:', len(train_negative_filenames))
print('Expected train:', total_train)
print('Actual files in train_files:', len(train_files))
#val
val_positive_filenames = positive_files[total_train_pos:]
val_negative_filenames = negative_files[total_train_neg:]
val_files = val_positive_filenames + val_negative_filenames
#sanity check
print('>>>>>Sanity check...')
print('Expected val positive:', test_pos)
print('Actual val positive:', len(val_positive_filenames))
print('Expected val negative:', test_neg)
print('Actual val negative:', len(val_negative_filenames))
print('Expected val:', test_total)
print('Actual files in val_files:', len(val_files))
#train_files = positive_files[:total_train_pos] + negative_files[:total_train_neg]
#val_files = positive_files[total_train_pos:] + negative_files[total_train_neg:]
shuffle(train_files)
shuffle(val_files)
#loading images
print('Loading train and val images...')
# Train
print ('Processing training data...')
X_train = []
X_train_names = []
y_train = []
fail_train = []
file_processed = 0
for filename in train_files:
file_processed += 1
if file_processed % 300 == 0:
print('Processing ', file_processed, 'of', len(train_files))
if positive in filename:
y_train.append([1,0])
elif negative in filename:
y_train.append([0,1])
else: #wrong filename
fail_train.append(filename)
img = image.load_img(path+filename, target_size=(224, 224, 3))
x = image.img_to_array(img)
if (model_name == "mobilenet"):
if (model_version == 'V1'):
x = preprocess_input_v1(x) #mobilenet v1
elif (model_version == 'V2'):
x = preprocess_input_v2(x) #mobilenet v2
X_train.append(x)
X_train_names.append(filename)
#sanity check
print('Sanity check...')
print('X_train total:', len(X_train))
print('y_train total:', len(y_train))
print('fail_train total:', len(fail_train))
print(fail_train)
#val
print ('Processing validation data...')
X_val = []
X_val_names = []
y_val = []
fail_val = []
file_processed = 0
for filename in val_files:
file_processed += 1
if file_processed % 300 == 0:
print('Processing ', file_processed, 'of', len(val_files))
if positive in filename:
y_val.append([1,0])
elif negative in filename:
y_val.append([0,1])
else: #wrong filename
fail_val.append(filename)
img = image.load_img(path+filename, target_size=(224, 224, 3))
x = image.img_to_array(img)
if (model_name == "mobilenet"):
if (model_version == 'V1'):
x = preprocess_input_v1(x) #mobilenet v1
elif (model_version == 'V2'):
x = preprocess_input_v2(x) #mobilenet v2
X_val.append(x)
X_val_names.append(filename)
#sanity check
print('Sanity check...')
print('X_val total:', len(X_val))
print('y_val total:', len(y_val))
print('fail_val total:', len(fail_val))
print(fail_val)
X_train = np.array(X_train)
y_train = np.array(y_train)
X_val = np.array(X_val)
y_val = np.array(y_val)
print('Shapes train')
print(X_train.shape)
print(y_train.shape)
print('Shapes val')
print(X_val.shape)
print(y_val.shape)
return X_train, y_train, X_train_names, X_val, y_val, X_val_names
def create_sets_by_patients(path, positive, negative, model_name, model_version, model, train_test_divide):
files_covid= os.listdir(path)
total_files = len(files_covid)
print ('Total files in disk:', total_files)
train_files = []
val_files = []
for filename in files_covid:
if any(x in filename for x in TEST_SET_PATIENTS):
val_files.append(filename)
else:
train_files.append(filename)
print('Total train files:', len(train_files))
print('Total test files:', len(val_files))
#loading images
print('Loading train and val images...')
# Train
print ('Processing training data...')
X_train = []
X_train_names = []
y_train = []
fail_train = []
file_processed = 0
for filename in train_files:
file_processed += 1
if file_processed % 300 == 0:
print('Processing ', file_processed, 'of', len(train_files))
if positive in filename:
y_train.append([1,0])
elif negative in filename:
y_train.append([0,1])
else: #wrong filename
fail_train.append(filename)
img = image.load_img(path+filename, target_size=(224, 224, 3))
x = image.img_to_array(img)
if (model_name == "mobilenet"):
if (model_version == 'V1'):
x = preprocess_input_v1(x) #mobilenet v1
elif (model_version == 'V2'):
x = preprocess_input_v2(x) #mobilenet v2
X_train.append(x)
X_train_names.append(filename)
#sanity check
print('Sanity check...')
print('X_train total:', len(X_train))
print('y_train total:', len(y_train))
print('fail_train total:', len(fail_train))
print(fail_train)
#val
print ('Processing validation data...')
X_val = []
X_val_names = []
y_val = []
fail_val = []
file_processed = 0
test_pos_total = 0
test_neg_total = 0
for filename in val_files:
file_processed += 1
if file_processed % 300 == 0:
print('Processing ', file_processed, 'of', len(val_files))
if positive in filename:
y_val.append([1,0])
test_pos_total += 1
elif negative in filename:
y_val.append([0,1])
test_neg_total += 1
else: #wrong filename
fail_val.append(filename)
img = image.load_img(path+filename, target_size=(224, 224, 3))
x = image.img_to_array(img)
if (model_name == "mobilenet"):
if (model_version == 'V1'):
x = preprocess_input_v1(x) #mobilenet v1
elif (model_version == 'V2'):
x = preprocess_input_v2(x) #mobilenet v2
X_val.append(x)
X_val_names.append(filename)
#sanity check
print('Sanity check...')
print('X_val total:', len(X_val))
print('y_val total:', len(y_val))
print('fail_val total:', len(fail_val))
print(fail_val)
print('Test positive examples:', test_pos_total)
print((float)(test_pos_total/len(y_val)))
print('Test negative examples:', test_neg_total)
print((float)(test_neg_total/len(y_val)))
X_train = np.array(X_train)
y_train = np.array(y_train)
X_val = np.array(X_val)
y_val = np.array(y_val)
print('Shapes train')
print(X_train.shape)
print(y_train.shape)
print('Shapes val')
print(X_val.shape)
print(y_val.shape)
return X_train, y_train, X_train_names, X_val, y_val, X_val_names
if __name__ == '__main__':
# parsing arguments
parser = argparse.ArgumentParser()
parser.add_argument("--model",
type=str,
default='mobilenet',
nargs="?",
help="Model: mobilenet or efficientnet.")
parser.add_argument("--model_version",
type=str,
default='V1',
nargs="?",
help="Mobile net version: V1 or V2. Efficient net scaling: B0, B1, B2, B3, B4, B5, B6 or B7.")
parser.add_argument("--dataset_path",
type=str,
default='/lus_stratification/generate_model/croppedi2p0/',
nargs="?",
help="Dataset's absolute path")
parser.add_argument("--results_path",
type=str,
default='/lus_stratification/generate_model/results/',
nargs="?",
help="Results's absolute path")
parser.add_argument("--train_test_divide",
type=float,
default=0.75,
nargs="?",
help="Train test divide value between 0.0 and 1.0")
parser.add_argument("--epochs",
type=int,
default=10,
nargs="?",
help="Epochs value between 1 and infinite")
parser.add_argument("--batch_size",
type=int,
default=32,
nargs="?",
help="Batch size value")
parser.add_argument("--steps_per_epoch",
type=int,
default=300,
nargs="?",
help="Steps per epoch value")
parser.add_argument("--use_steps_per_epoch",
type=int,
default=0,
nargs="?",
help="Use steps per epoch value: 1 use, other not use. Default 0.")
parser.add_argument("--optimizer",
type=str,
default='adam',
nargs="?",
help="Optimizer")
parser.add_argument("--loss",
type=str,
default='binary_crossentropy',
nargs="?",
help="Loss")
parser.add_argument("--label_dataset_zero",
type=str,
default='N0',
nargs="?",
help="Label dataset 0: N0, B0, M0, S0, C0, P0.")
parser.add_argument("--label_dataset_one",
type=str,
default='N1',
nargs="?",
help="Label dataset 1: N1, B1, M1, S1, C1, P1.")
parser.add_argument("--strategy",
type=str,
default='combined',
nargs="?",
help="Create sets strategy: combined or by_patients.")
parser.add_argument("--random_seed",
type=int,
default=12345,
nargs="?",
help="Random seed for reproducible results")
args = parser.parse_args()
# reproducible results
os.environ['PYTHONHASHSEED'] = '0'
np.random.seed(args.random_seed)
seed(args.random_seed)
set_seed(args.random_seed)
# get the model without the denses
if (args.model == 'mobilenet'):
if (args.model_version == 'V1'):
base_model = MobileNet(weights='imagenet', include_top=False)
elif (args.model_version == 'V2'):
base_model = MobileNetV2(weights='imagenet', include_top=False)
elif (args.model == 'efficientnet'):
if args.model_version == 'B0':
base_model = EfficientNetB0(weights='imagenet', include_top=False)
if args.model_version == 'B1':
base_model = EfficientNetB1(weights='imagenet', include_top=False)
if args.model_version == 'B2':
base_model = EfficientNetB2(weights='imagenet', include_top=False)
if args.model_version == 'B3':
base_model = EfficientNetB3(weights='imagenet', include_top=False)
if args.model_version == 'B4':
base_model = EfficientNetB4(weights='imagenet', include_top=False)
if args.model_version == 'B5':
base_model = EfficientNetB5(weights='imagenet', include_top=False)
if args.model_version == 'B6':
base_model = EfficientNetB6(weights='imagenet', include_top=False)
if args.model_version == 'B7':
base_model = EfficientNetB7(weights='imagenet', include_top=False)
last_layer = base_model.layers[-1]
new_top_layer_global_avg_pooling = GlobalAveragePooling2D()(last_layer.output)
new_dense = Dense(1024, activation='relu')(new_top_layer_global_avg_pooling)
predictions = Dense(2, activation='softmax')(new_dense)
model = Model(base_model.input, predictions)
# we will only train the new denses for the baseline
for layer in base_model.layers:
layer.trainable = False
# compile model
model.compile(optimizer=args.optimizer, loss=args.loss, metrics = ["accuracy"])
# see model structure
model.summary()
# get the data
print('***** Load files...')
if args.strategy == 'combined':
X_train, y_train, X_train_names, X_val, y_val, X_val_names = create_sets(args.dataset_path,
args.label_dataset_zero,
args.label_dataset_one,
args.model,
args.model_version,
model,
args.train_test_divide)
elif args.strategy == 'by_patients':
X_train, y_train, X_train_names, X_val, y_val, X_val_names = create_sets_by_patients(args.dataset_path,
args.label_dataset_zero,
args.label_dataset_one,
args.model,
args.model_version,
model,
args.train_test_divide)
# fit model
if (args.use_steps_per_epoch == 1):
results = model.fit(X_train, y_train, epochs=args.epochs, steps_per_epoch=args.steps_per_epoch, batch_size=args.batch_size, validation_data=(X_val, y_val))
else:
results | |
#-----------------------------------------------------------------
# ** ATTENTION **
# This code was automatically generated from the file:
# setlx2py/setlx_ast.cfg
#
# Do not modify it directly. Modify the configuration file and
# run the generator again.
# ** ** *** ** **
#
# AST Node classes.
#
# Copyright (C) 2008-2013, <NAME>
# 2013, <NAME>
# License: BSD
#-----------------------------------------------------------------
import sys
class Node(object):
""" Abstract base class for AST nodes.
"""
def children(self):
""" A sequence of all children that are Nodes
"""
pass
def __str__(self):
return self.show()
def __repr__(self):
return str(self.to_tuples())
def to_tuples(self):
result = [self.__class__.__name__]
attr_list = [getattr(self, n) for n in self.attr_names]
result.extend(attr_list)
for (child_name, child) in self.children():
result.append( child.to_tuples() )
return tuple(result)
def show(self,
buf=None,
offset=0,
attrnames=False,
nodenames=False,
showcoord=False,
_my_node_name=None):
""" Pretty print the Node and all its attributes and
children (recursively) to a buffer.
buf:
Open IO buffer into which the Node is printed.
If it is None or let empty, instead a string
is returned
offset:
Initial offset (amount of leading spaces)
attrnames:
True if you want to see the attribute names in
name=value pairs. False to only see the values.
nodenames:
True if you want to see the actual node names
within their parents.
showcoord:
Do you want the coordinates of each Node to be
displayed.
"""
s = ''
lead = ' ' * offset
if nodenames and _my_node_name is not None:
s += lead + self.__class__.__name__+ ' <' + _my_node_name + '>: '
else:
s += lead + self.__class__.__name__+ ': '
if self.attr_names:
if attrnames:
nvlist = [(n, getattr(self,n)) for n in self.attr_names]
attrstr = ', '.join('%s=%s' % nv for nv in nvlist)
else:
vlist = [getattr(self, n) for n in self.attr_names]
attrstr = ', '.join('%s' % v for v in vlist)
s += attrstr
if showcoord: s += ' (at %s)' % self.coord
s += '\n'
for (child_name, child) in self.children():
s += child.show(
buf,
offset=offset + 2,
attrnames=attrnames,
nodenames=nodenames,
showcoord=showcoord,
_my_node_name=child_name)
if buf is None: return s
else: buf.write(s)
class NodeVisitor(object):
""" A base NodeVisitor class for visiting c_ast nodes.
Subclass it and define your own visit_XXX methods, where
XXX is the class name you want to visit with these
methods.
For example:
class ConstantVisitor(NodeVisitor):
def __init__(self):
self.values = []
def visit_Constant(self, node):
self.values.append(node.value)
Creates a list of values of all the constant nodes
encountered below the given node. To use it:
cv = ConstantVisitor()
cv.visit(node)
Notes:
* generic_visit() will be called for AST nodes for which
no visit_XXX method was defined.
* The children of nodes for which a visit_XXX was
defined will not be visited - if you need this, call
generic_visit() on the node.
You can use:
NodeVisitor.generic_visit(self, node)
* Modeled after Python's own AST visiting facilities
(the ast module of Python 3.0)
"""
def visit(self, node):
""" Visit a node.
"""
method = 'visit_' + node.__class__.__name__
visitor = getattr(self, method, self.generic_visit)
return visitor(node)
def generic_visit(self, node):
""" Called if no explicit visitor function exists for a
node. Implements preorder visiting of the node.
"""
for c_name, c in node.children():
self.visit(c)
class As (Node):
def __init__(self, expr, coord=None):
self.tags = []
self.expr = expr
self.coord = coord
def children(self):
nodelist = []
if self.expr is not None: nodelist.append(("expr", self.expr))
return tuple(nodelist)
attr_names = ()
class Assert(Node):
def __init__(self, cond, expr, coord=None):
self.tags = []
self.cond = cond
self.expr = expr
self.coord = coord
def children(self):
nodelist = []
if self.cond is not None: nodelist.append(("cond", self.cond))
if self.expr is not None: nodelist.append(("expr", self.expr))
return tuple(nodelist)
attr_names = ()
class ArgumentList (Node):
def __init__(self, arguments, coord=None):
self.tags = []
self.arguments = arguments
self.coord = coord
def children(self):
nodelist = []
for i, child in enumerate(self.arguments or []):
nodelist.append(("arguments[%d]" % i, child))
return tuple(nodelist)
attr_names = ()
class Assignment(Node):
def __init__(self, op, target, right, coord=None):
self.tags = []
self.op = op
self.target = target
self.right = right
self.coord = coord
def children(self):
nodelist = []
if self.target is not None: nodelist.append(("target", self.target))
if self.right is not None: nodelist.append(("right", self.right))
return tuple(nodelist)
attr_names = ('op',)
class AttributeRef (Node):
def __init__(self, obj, field, coord=None):
self.tags = []
self.obj = obj
self.field = field
self.coord = coord
def children(self):
nodelist = []
if self.obj is not None: nodelist.append(("obj", self.obj))
if self.field is not None: nodelist.append(("field", self.field))
return tuple(nodelist)
attr_names = ()
class BinaryOp(Node):
def __init__(self, op, left, right, coord=None):
self.tags = []
self.op = op
self.left = left
self.right = right
self.coord = coord
def children(self):
nodelist = []
if self.left is not None: nodelist.append(("left", self.left))
if self.right is not None: nodelist.append(("right", self.right))
return tuple(nodelist)
attr_names = ('op',)
class Backtrack(Node):
def __init__(self, coord=None):
self.tags = []
self.coord = coord
def children(self):
return ()
attr_names = ()
class Block(Node):
def __init__(self, stmts, coord=None):
self.tags = []
self.stmts = stmts
self.coord = coord
def children(self):
nodelist = []
for i, child in enumerate(self.stmts or []):
nodelist.append(("stmts[%d]" % i, child))
return tuple(nodelist)
attr_names = ()
class Break(Node):
def __init__(self, coord=None):
self.tags = []
self.coord = coord
def children(self):
return ()
attr_names = ()
class Call (Node):
def __init__(self, name, args, coord=None):
self.tags = []
self.name = name
self.args = args
self.coord = coord
def children(self):
nodelist = []
if self.name is not None: nodelist.append(("name", self.name))
if self.args is not None: nodelist.append(("args", self.args))
return tuple(nodelist)
attr_names = ()
class Case(Node):
def __init__(self, cond, body, coord=None):
self.tags = []
self.cond = cond
self.body = body
self.coord = coord
def children(self):
nodelist = []
if self.cond is not None: nodelist.append(("cond", self.cond))
if self.body is not None: nodelist.append(("body", self.body))
return tuple(nodelist)
attr_names = ()
class CaseList (Node):
def __init__(self, cases, coord=None):
self.tags = []
self.cases = cases
self.coord = coord
def children(self):
nodelist = []
for i, child in enumerate(self.cases or []):
nodelist.append(("cases[%d]" % i, child))
return tuple(nodelist)
attr_names = ()
class CatchClause(Node):
def __init__(self, type, name, block, coord=None):
self.tags = []
self.type = type
self.name = name
self.block = block
self.coord = coord
def children(self):
nodelist = []
if self.name is not None: nodelist.append(("name", self.name))
if self.block is not None: nodelist.append(("block", self.block))
return tuple(nodelist)
attr_names = ('type',)
class Catches (Node):
def __init__(self, clauses, coord=None):
self.tags = []
self.clauses = clauses
self.coord = coord
def children(self):
nodelist = []
for i, child in enumerate(self.clauses or []):
nodelist.append(("clauses[%d]" % i, child))
return tuple(nodelist)
attr_names = ()
class Check(Node):
def __init__(self, block, coord=None):
self.tags = []
self.block = block
self.coord = coord
def children(self):
nodelist = []
if self.block is not None: nodelist.append(("block", self.block))
return tuple(nodelist)
attr_names = ()
class Class(Node):
def __init__(self, name, params, block, static, coord=None):
self.tags = []
self.name = name
self.params = params
self.block = block
self.static = static
self.coord = coord
def children(self):
nodelist = []
if self.name is not None: nodelist.append(("name", self.name))
if self.params is not None: nodelist.append(("params", self.params))
if self.block is not None: nodelist.append(("block", self.block))
if self.static is not None: nodelist.append(("static", self.static))
return tuple(nodelist)
attr_names = ()
class Comprehension (Node):
def __init__(self, klass, expr, iterators, cond, coord=None):
self.tags = []
self.klass = klass
self.expr = expr
self.iterators = iterators
self.cond = cond
self.coord = coord
def children(self):
nodelist = []
if self.expr is not None: nodelist.append(("expr", self.expr))
if self.iterators is not None: nodelist.append(("iterators", self.iterators))
if self.cond is not None: nodelist.append(("cond", self.cond))
return tuple(nodelist)
attr_names = ('klass',)
class Continue(Node):
def __init__(self, coord=None):
self.tags = []
self.coord = coord
def children(self):
return ()
attr_names = ()
class Constant(Node):
def __init__(self, klass, value, coord=None):
self.tags = []
self.klass = klass
self.value = value
self.coord = coord
def children(self):
nodelist = []
return tuple(nodelist)
attr_names = ('klass','value',)
class Default(Node):
def __init__(self, body, coord=None):
self.tags = []
self.body = body
self.coord = coord
def children(self):
nodelist = []
if self.body is not None: nodelist.append(("body", self.body))
return tuple(nodelist)
attr_names = ()
class DoWhile(Node):
def __init__(self, cond, body, coord=None):
self.tags = []
self.cond = cond
self.body = body
self.coord = coord
def children(self):
nodelist = []
if self.cond is not None: nodelist.append(("cond", self.cond))
if self.body is not None: nodelist.append(("body", self.body))
return tuple(nodelist)
attr_names = ()
class Exit(Node):
def __init__(self, coord=None):
self.tags | |
<gh_stars>10-100
"""Circuit visualization via the pyx package
This requires a working LaTeX installation.
"""
try:
import pyx
except ImportError as e:
print("PyX is not installed. Please install PyX for circuit visualization purposes.")
raise e
import shutil
import qnet.algebra.core.circuit_algebra as ca
from qnet.printing import latex as tex # TODO tex -> latex
from qnet.algebra.core.circuit_algebra import Component
__all__ = ['draw_circuit_canvas', 'draw_circuit']
pyx.text.set(pyx.text.LatexRunner)
pyx.text.preamble(r'\usepackage{amsmath,amssymb}')
HUNIT = +4 # Basic unit for the width of a single Circuit object
# the positive value corresponds to visualizing the channel
# 'flow' from left to right
VUNIT = -1. # Basic unit for the height of a single Circuit object,
# the negative value makes the effective y-axis point downwards
RHMARGIN = .1 # Relative horizontal margin between gridline and Circuit object
RVMARGIN = .2 # Relative vertical margin between gridline and Circuit object
RPLENGTH = .4 # Relative width of a channel permutation
GS_CANDIDATES = ['gs', 'mgs', 'rungs', 'gswin32c']
for gs in GS_CANDIDATES:
if shutil.which(gs) is not None:
GS = gs
break
else:
GS = None
# helper function
def _curve(x1, y1, x2, y2, hunit = HUNIT, vunit = VUNIT):
"""
Return a PyX curved path from (x1, y1) to (x2, y2),
such that the slope at either end is zero.
"""
ax1, ax2, axm = x1 * hunit, x2 * hunit, (x1 + x2) * hunit / 2
ay1, ay2 = y1 * vunit, y2 * vunit
return pyx.path.curve(ax1, ay1, axm, ay1, axm, ay2, ax2, ay2)
def draw_circuit_canvas(circuit, hunit = HUNIT, vunit = VUNIT, rhmargin = RHMARGIN, rvmargin = RVMARGIN, rpermutation_length = RPLENGTH, draw_boxes = True, permutation_arrows = False):
"""
Generate a PyX graphical representation of a circuit expression object.
:param circuit: The circuit expression
:type circuit: ca.Circuit
:param hunit: The horizontal length unit, default = ``HUNIT``
:type hunit: float
:param vunit: The vertical length unit, default = ``VUNIT``
:type vunit: float
:param rhmargin: relative horizontal margin, default = ``RHMARGIN``
:type rhmargin: float
:param rvmargin: relative vertical margin, default = ``RVMARGIN``
:type rvmargin: float
:param rpermutation_length: the relative length of a permutation circuit, default = ``RPLENGTH``
:type rpermutation_length: float
:param draw_boxes: Whether to draw indicator boxes to denote subexpressions (Concatenation, SeriesProduct, etc.), default = ``True``
:type draw_boxes: bool
:param permutation_arrows: Whether to draw arrows within the permutation visualization, default = ``False``
:type permutation_arrows: bool
:return: A PyX canvas object that can be further manipulated or printed to an output image.
:rtype: pyx.canvas.canvas
"""
if not isinstance(circuit, ca.Circuit):
raise ValueError()
nc = circuit.cdim
c = pyx.canvas.canvas()
if circuit is ca.CIdentity:
# simply create a line going through
c.stroke(pyx.path.line(0, vunit/2, hunit, vunit/2))
return c, (1, 1), (.5,), (.5,)
elif isinstance(circuit, (ca.CircuitSymbol, ca.SeriesInverse, ca.SLH, Component)):
# draw box
b = pyx.path.rect(rhmargin * hunit, rvmargin * vunit, hunit - 2 * rhmargin * hunit, nc * vunit - 2 * rvmargin * vunit)
c.stroke(b)
texstr = "${}$".format(tex(circuit) if not isinstance(circuit, ca.SLH) else r"{{\rm SLH}}_{{{}}}".format(tex(circuit.space)))
# draw symbol name
c.text(hunit/2., nc * vunit/2., texstr , [pyx.text.halign.boxcenter, pyx.text.valign.middle])
# draw connectors at half-unit positions
connector_positions = tuple((.5 + k) for k in range(nc))
for y in connector_positions:
c.stroke(pyx.path.line(0, y * vunit, rhmargin * hunit, y * vunit), [pyx.deco.earrow()])
c.stroke(pyx.path.line(hunit * (1 - rhmargin), y * vunit, hunit, y * vunit))
return c, (1, nc), connector_positions, connector_positions
elif isinstance(circuit, ca.CPermutation):
permutation = circuit.permutation
connector_positions = tuple((k + 0.5) for k in range(nc))
target_positions = [connector_positions[permutation[k]] for k in range(nc)]
# draw curves
for y1, y2 in zip(connector_positions, target_positions):
if permutation_arrows:
c.stroke(_curve(0, y1, rpermutation_length, y2, hunit = hunit, vunit = vunit), [pyx.deco.earrow()])
else:
c.stroke(_curve(0, y1, rpermutation_length, y2, hunit = hunit, vunit = vunit))
if draw_boxes:
b = pyx.path.rect(.5* rhmargin * hunit, .5* rvmargin * vunit, rpermutation_length * hunit - rhmargin * hunit, nc * vunit - rvmargin * vunit)
c.stroke(b, [pyx.style.linewidth.thin, pyx.style.linestyle.dashed, pyx.color.rgb.green])
return c, (rpermutation_length, nc), connector_positions, connector_positions
elif isinstance(circuit, ca.SeriesProduct):
assert len(circuit.operands) > 1
# generate graphics of operad subsystems
sub_graphics = [draw_circuit_canvas(op, hunit = hunit,
vunit = vunit, rhmargin = rhmargin,
rvmargin = rvmargin,
rpermutation_length = rpermutation_length,
draw_boxes = draw_boxes,
permutation_arrows = permutation_arrows) for op in reversed(circuit.operands)]
# set up first one
previous_csub, previous_dims, previous_c_in, previous_c_out = sub_graphics[0]
hoffset = 0
c.insert(previous_csub)
hoffset += previous_dims[0]
max_height = previous_dims[1]
# this will later become the full series in-port coordinate tuple
first_c_in = previous_c_in
# now add all other operand subsystems
for csub, dims, c_in, c_out in sub_graphics[1:]:
assert dims[1] >= 0
max_height = max(dims[1], max_height)
if previous_c_out != c_in: # vertical port locations don't agree, map signals correspondingly
x1 = hoffset
x2 = hoffset + rpermutation_length
# draw connection curves
for y1, y2 in zip(previous_c_out, c_in):
c.stroke(_curve(x1, y1, x2, y2, hunit = hunit, vunit = vunit))
hoffset += rpermutation_length
previous_c_in, previous_c_out = c_in, c_out
# now insert current system
c.insert(csub, [pyx.trafo.translate(hunit * hoffset, 0)])
hoffset += dims[0]
if draw_boxes:
b = pyx.path.rect(.5 * rhmargin * hunit, .5 * rvmargin * vunit, hoffset * hunit - 1. * rhmargin * hunit, max_height * vunit + rvmargin * vunit)
c.stroke(b, [pyx.style.linewidth.thin, pyx.style.linestyle.dashed, pyx.color.rgb.red])
return c, (hoffset, max_height), first_c_in, c_out
elif isinstance(circuit, ca.Concatenation):
voffset = 0
total_cin, total_cout = (), ()
widths = [] # stores the component width for each channel(!)
# generate all operand subsystem graphics and stack them vertically
for op in circuit.operands:
csub, dims, c_in, c_out = draw_circuit_canvas(op, hunit = hunit,
vunit = vunit, rhmargin = rhmargin,
rvmargin = rvmargin,
rpermutation_length = rpermutation_length,
draw_boxes = draw_boxes,
permutation_arrows = permutation_arrows)
# add appropriatly offsets to vertical port coordinates
total_cin += tuple(y + voffset for y in c_in)
total_cout += tuple(y + voffset for y in c_out)
c.insert(csub, [pyx.trafo.translate(0, vunit * voffset)])
# keep track of width in all channel for this subsystem
widths += [dims[0]] * op.cdim
voffset += dims[1]
max_width = max(widths)
if max_width > min(widths): # components differ in width => we must extend the narrow component output lines
for x,y in zip(widths, total_cout):
if x == max_width:
continue
ax, ax_to = x * hunit, max_width * hunit
ay = y * vunit
c.stroke(pyx.path.line(ax, ay, ax_to, ay))
if draw_boxes:
b = pyx.path.rect(.5 * rhmargin * hunit, .5 * rvmargin * vunit, max_width * hunit - 1. * rhmargin * hunit, voffset * vunit - rvmargin * vunit)
c.stroke(b, [pyx.style.linewidth.thin, pyx.style.linestyle.dashed, pyx.color.rgb.blue])
return c, (max_width, voffset), total_cin, total_cout
elif isinstance(circuit, ca.Feedback):
# generate and insert graphics of subsystem
csub, dims, c_in, c_out = draw_circuit_canvas(circuit.operand, hunit = hunit,
vunit = vunit, rhmargin = rhmargin,
rvmargin = rvmargin,
rpermutation_length = rpermutation_length,
draw_boxes = draw_boxes,
permutation_arrows = permutation_arrows)
c.insert(csub, [pyx.trafo.translate(hunit * .5 * rhmargin, 0)])
width, height = dims
# create feedback loop
fb_out, fb_in = circuit.out_in_pair
out_coords = (width + .5 * rhmargin) * hunit, c_out[fb_out] * vunit
in_coords = .5 * rhmargin * hunit, c_in[fb_in] * vunit
upper_y = (height) * vunit
feedback_line = pyx.path.path(pyx.path.moveto(*out_coords), pyx.path.lineto(out_coords[0], upper_y),
pyx.path.lineto(in_coords[0], upper_y), pyx.path.lineto(*in_coords))
c.stroke(feedback_line)
# remove feedback port coordinates
new_c_in = c_in[:fb_in] + c_in[fb_in + 1 :]
new_c_out = c_out[:fb_out] + c_out[fb_out + 1 :]
# extend port connectors a little bit outward,
# such that the feedback loop is not at the edge anymore
for y in new_c_in:
c.stroke(pyx.path.line(0, y * vunit, .5 * rhmargin * hunit, y * vunit))
for y in new_c_out:
c.stroke(pyx.path.line((width + .5 * rhmargin) * hunit, y * vunit, (width + rhmargin) * hunit, y * vunit))
return c, (width + rhmargin, height + rvmargin), new_c_in, new_c_out
raise Exception('Visualization not implemented for type %s' % type(circuit))
def draw_circuit(circuit, filename, direction = 'lr',
hunit = HUNIT, vunit = VUNIT,
rhmargin = RHMARGIN, rvmargin = RVMARGIN,
rpermutation_length = RPLENGTH,
draw_boxes = True,
permutation_arrows = False):
"""
Generate a graphic representation of circuit and store them in a file.
The graphics format is determined from the file extension.
:param circuit: The circuit expression
:type circuit: ca.Circuit
:param filename: A filepath to store the output image under. The file name suffix determines the output graphics format
:type filename: str
:param direction: The horizontal direction of | |
from __future__ import annotations
from typing import Union, Optional
import subprocess
from .option import EmulatorOptions
from .args import subprocess_args
from .opencv import get_pos_img
from .node import Node, By
import time
import os
import base64
import ctypes
import re
# type
position = Union[list[int, int], tuple[int, int]]
class ObjectEmulator:
sed = os.path.join(os.path.dirname(__file__), "sed", "sed.exe")
def __init__(
self,
parent,
index: int,
name: str,
top_hwnd: int = 0,
bind_hwnd: int = 0,
android: int = 0,
pid: int = -1,
pid_vbox: int = -1
) -> None:
self._parent = parent
self._controller = parent.controller
self._index = index
self._name = name
self._top_hwnd = top_hwnd
self._bind_hwnd = bind_hwnd
self._android = android
self._pid = pid
self._pid_vbox = pid_vbox
self._this = "--index " + str(self._index)
self._error = ""
self._dump = os.path.join(os.path.normpath(os.path.dirname(__file__)), "dump", self._index + "xml")
@property
def parent(self):
return self.parent
@property
def index(self):
return self._index
@property
def name(self):
return self._name
@property
def top_hwnd(self):
return self._top_hwnd
@property
def bind_hwnd(self):
return self._bind_hwnd
@property
def pid(self):
return self._pid
@property
def pid_vbox(self):
return self._pid_vbox
@property
def error(self):
return self._error
def start(self, wait: bool = True):
if not self.is_running():
self._run_cmd(f'{self._controller} launch {self._this}')
if wait:
self.wait_to_started()
self._update()
else:
while not self._update():
time.sleep(.5)
else:
self._update()
return self
def _update(self) -> bool:
cmd = f'{self._controller} list2 | "{self.sed}" -n "/^{self._index},/p"'
args = self._run_cmd(cmd).split("\r\n")[0].split(",")
if args[2] != "0":
self._top_hwnd = int(args[2])
self._bind_hwnd = int(args[3])
self._android = int(args[4])
self._pid = int(args[5])
self._pid_vbox = int(args[6])
return True
return False
def wait_to_started(self, timeout: float = 60):
timer = time.perf_counter()
while (timeout == 0 or time.perf_counter() - timer < timeout) and not self.adb_connected():
time.sleep(1)
return self
def is_running(self) -> bool:
cmd = f'{self._controller} isrunning {self._this}'
return self._run_cmd(cmd) == "running"
def restart(self, wait: bool = True):
if self.is_running():
cmd = f'{self._controller} reboot {self._this}'
self._run_cmd(cmd)
if wait:
self.wait_to_started()
else:
time.sleep(3)
self._update()
self._error = ""
else:
self._error = "emulator is not running"
return self
def rename(self, new_name: str):
cmd = f'{self._controller} rename {self._this} --title "{new_name}"'
self._error = self._run_cmd(cmd)
if not self._error:
self._name = new_name
return self
def install_app(self, source: str):
if self.is_running():
path = os.path.normpath(source)
if os.path.isfile(path):
cmd = f'{self._controller} installapp {self._this} --filename "{path}"'
else:
cmd = f'{self._controller} installapp {self._this} --packagename "{source}"'
self._error = self._run_cmd(cmd)
else:
self._error = "The emulator is not started!"
return self
def uninstall_app(self, package_name: str):
if self.is_running():
cmd = f'{self._controller} uninstallapp {self._this} --packagename "{package_name}"'
self._error = self._run_cmd(cmd)
else:
self._error = "The emulator is not started!"
return self
def run_app(self, package_name: str):
if self.is_running():
cmd = f'{self._controller} runapp {self._this} --packagename "{package_name}"'
self._error = self._run_cmd(cmd)
else:
self._error = "The emulator is not started!"
return self
def kill_app(self, package_name: str):
if self.is_running():
cmd = f'{self._controller} killapp {self._this} --packagename "{package_name}"'
self._error = self._run_cmd(cmd)
else:
self._error = "The emulator is not started!"
return self
def clear_app(self, package_name: str):
cmd = f'shell pm clear "{package_name}"'
self._error = self._run_adb(cmd)
return self
def list_packages(self) -> Optional[list]:
cmd = f"shell pm list packages | sed 's/^package://'"
return [package for package in self._run_adb(cmd).split("\r\r\n")[:-1]]
def set_locate(self, locate: str):
if self.is_running():
cmd = f'{self._controller} locate {self._this} --LLI "{locate}"'
self._error = self._run_cmd(cmd)
else:
self._error = "The emulator is not started!"
return self
def update_properties(self, prop: dict):
for key in prop.keys():
cmd = f'{self._controller} setprop {self._this} --key "{key}" --value "{prop[key]}"'
self._error = self._run_cmd(cmd)
if self._error:
break
return self
def get_properties(self) -> dict:
cmd = f"shell getprop | sed 's/[][]//g'"
lst = self._run_adb(cmd).split("\r\r\n")[:-1]
return {key: value for key, value in [x.split(": ") for x in lst]}
def down_cpu(self, rate: int):
if rate < 0:
rate = 0
if rate > 100:
rate = 100
cmd = f'{self._controller} downcpu {self._this} --rate {int(rate)}'
self._error = self._run_cmd(cmd)
return self
def backup(self, file_path: str):
path = os.path.normpath(file_path)
cmd = f'{self._controller} backup {self._this} --file "{path}"'
self._error = self._run_cmd(cmd)
return self
def restore(self, file_path: str):
path = os.path.normpath(file_path)
if os.path.isfile(path):
cmd = f'{self._controller} restore {self._this} --file "{path}"'
self._error = self._run_cmd(cmd)
else:
self._error = f'Path "{file_path}" invalid!'
return self
def action(self, actions: dict):
for key in actions.keys():
cmd = f'{self._controller} action {self._this} --key "{key}" --value "{actions[key]}"'
self._error = self._run_cmd(cmd)
if self._error:
break
return self
def scan(self, file_path: str):
if self.is_running():
path = os.path.normpath(file_path)
if os.path.isfile(path):
cmd = f'{self._controller} scan {self._this} --file "{path}"'
self._error = self._run_cmd(cmd)
else:
self._error = f'Path "{file_path}" invalid!'
else:
self._error = "emulator is not running"
return self
def pull(self, remote: str, local: str):
lo_path = os.path.normpath(local)
cmd = f'pull "{remote}" "{lo_path}"'
out = self._run_adb(cmd)
if "KB/s" not in out:
self._error = out
else:
self._error = ""
return self
def push(self, local: str, remote: str):
lo_path = os.path.normpath(local)
cmd = f'push "{lo_path}" "{remote}"'
out = self._run_adb(cmd)
if "KB/s" not in out:
self._error = out
else:
self._error = ""
return self
def capture(self, as_file):
path = os.path.normpath(as_file)
b_img = self._get_screencap_b64decode()
if b_img:
with open(path, mode="wb") as file:
file.write(b_img)
self._error = ""
return self
def quit(self) -> None:
cmd = f'{self._controller} quit {self._this}'
self._run_cmd(cmd)
def setting(self, options: EmulatorOptions):
opts = " ".join([f"{key} {options.options[key]}" for key in options.options.keys()])
if opts:
cmd = f'{self._controller} modify {self._this} {opts}'
self._error = self._run_cmd(cmd)
return self
def adb_connected(self) -> bool:
cmd = f'{self._controller} adb {self._this} --command "get-state"'
return self._run_cmd(cmd)[:-3] == "device"
def tap(self, *pos: position):
for p in pos:
cmd = f'shell input tap {p[0]} {p[1]}'
self._error = self._run_adb(cmd)
return self
def swipe(self, _from: position, to: position, duration: int = 100):
cmd = f'shell input swipe {_from[0]} {_from[1]} {to[0]} {to[1]} {duration}'
self._error = self._run_adb(cmd)
return self
def send_text(self, text: str):
cmd = f'shell input text "{text.replace(" ", r"%s")}"'
self._error = self._run_adb(cmd)
return self
def send_event(self, keycode: int):
cmd = f'shell input keyevent {keycode}'
self._error = self._run_adb(cmd)
return self
def home(self):
return self.send_event(3)
def back(self):
return self.send_event(4)
def app_switcher(self):
return self.send_event(187)
def tap_to_img(self, img_path: str, timeout: float = 0, threshold: float = 0.8):
self._error = ""
path = os.path.normpath(img_path)
if os.path.isfile(path):
if timeout == 0:
screen = self._get_screencap_b64decode()
if not screen:
return self
pos = get_pos_img(path, screen, threshold=threshold)
else:
pos = self._wait_img_and_get_pos(path, 0 if timeout < 0 else timeout, threshold, False)
if pos:
self.tap(pos[0])
else:
self._error = "image not in screen"
else:
self._error = f'The path "{img_path}" invalid!'
return self
def tap_to_imgs(self, img_path: str, timeout: float = 0, threshold: float = 0.8):
self._error = ""
path = os.path.normpath(img_path)
if os.path.isfile(path):
if timeout == 0:
screen = self._get_screencap_b64decode()
if not screen:
return self
pos = get_pos_img(path, screen, multi=True, threshold=threshold)
else:
pos = self._wait_img_and_get_pos(path, 0 if timeout < 0 else timeout, threshold, True)
if pos:
self.tap(*pos)
else:
self._error = "image not in screen"
else:
self._error = f'The path "{img_path}" invalid!'
return self
def wait_img_existed(self, img_path: str, timeout: float = 0, threshold: float = 0.8):
self._error = ""
path = os.path.normpath(img_path)
if os.path.isfile(path):
self._wait_img_and_get_pos(path, timeout, threshold, False)
else:
self._error = f'The path "{img_path}" invalid'
return self
def _get_screencap_b64decode(self) -> Optional[bytes]:
if self.adb_connected:
out = self._run_cmd(f'{self._controller} adb {self._this} --command "shell screencap -p | base64"')
try:
return base64.b64decode(out.replace("\r\r\n", "\n"))
except Exception:
print("output capture error:", out)
return
self._error = "adb is not connected"
def _wait_img_and_get_pos(self, img_path: str, timeout: float, threshold: float, multi: bool):
screen = self._get_screencap_b64decode()
if screen:
timer = time.perf_counter()
pos = get_pos_img(obj=img_path, _in=screen, threshold=threshold, multi=multi)
while not pos:
screen = self._get_screencap_b64decode()
if not screen:
return
pos = get_pos_img(obj=img_path, _in=screen, threshold=threshold, multi=multi)
if timeout != 0 and time.perf_counter() - timer > timeout:
self._error = "Timeout"
return
return pos
def dump_xml(self, as_file: str):
path = os.path.normpath(as_file)
cmd = 'shell uiautomator dump /sdcard/window_dump.xml'
self._run_adb(cmd)
self.pull("/sdcard/window_dump.xml", path)
return self
def find_node(self, by: int, value: str) -> Optional[Node]:
self.dump_xml(self._dump)
with open(self._dump, mode="r", encoding="utf-8") as file:
xml = file.read()
if by == By.TEXT:
node = re.search(r'(?<=<node )index="\d+" text="%s".*?(?=/>|>)' % value, xml)
if node:
return self._create_node(node.group())
elif by == By.RESOURCE_ID:
node = re.search(r'(?<=<node )index="\d+" text=".*" resource-id="%s".*?(?=/>|>)' % value, xml)
if node:
return self._create_node(node.group())
elif by == By.CLASS:
node = re.search(r'(?<=<node )index="\d+" text=".*" resource-id=".*" class="%s".*?(?=/>|>)' % value, xml)
if node:
return self._create_node(node.group())
elif by == By.PACKAGE:
node = re.search(r'(?<=<node )index="\d+" text=".*" resource-id=".*" class=".*" package="%s".*?(?=/>|>)' % value, xml)
if node:
return self._create_node(node.group())
def find_nodes(self, by: int, value: str) -> list[Node]:
self.dump_xml(self._dump)
with open(self._dump, mode="r", encoding="utf-8") as | |
# The MIT License (MIT)
#
# Copyright (c) 2021
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import os
import sys
from contextlib import contextmanager
import importlib
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import pycuda.driver
from pycuda.gl import graphics_map_flags
from glumpy import app, gloo, gl
from torch.profiler import profile, record_function, ProfilerActivity
import neuralff
from neuralff.model import BasicNetwork
import neuralff.ops as nff_ops
from scipy import sparse
import scipy.sparse.linalg as linalg
import cv2
import skimage
import imageio
import tqdm
import argparse
def load_rgb(path):
img = imageio.imread(path)
img = skimage.img_as_float32(img)
img = img[:,:,:3]
#img -= 0.5
#img *= 2.
#img = img.transpose(2, 0, 1)
img = img.transpose(1, 0, 2)
return img
@contextmanager
def cuda_activate(img):
"""Context manager simplifying use of pycuda.gl.RegisteredImage"""
mapping = img.map()
yield mapping.array(0,0)
mapping.unmap()
def create_shared_texture(w, h, c=4,
map_flags=graphics_map_flags.WRITE_DISCARD,
dtype=np.uint8):
"""Create and return a Texture2D with gloo and pycuda views."""
tex = np.zeros((h,w,c), dtype).view(gloo.Texture2D)
tex.activate() # force gloo to create on GPU
tex.deactivate()
cuda_buffer = pycuda.gl.RegisteredImage(int(tex.handle), tex.target, map_flags)
return tex, cuda_buffer
backend = "glumpy.app.window.backends.backend_glfw"
importlib.import_module(backend)
def load_rgb(path):
img = imageio.imread(path)
img = skimage.img_as_float32(img)
img = img[:,:,:3]
return img
def resize_rgb(img, height, width, interpolation=cv2.INTER_LINEAR):
img = cv2.resize(img, dsize=(height, width), interpolation=interpolation)
return img
class InteractiveApp(sys.modules[backend].Window):
#def __init__(self, render_res=[720, 1024]):
#def __init__(self, render_res=[100, 200]):
def __init__(self, args):
self.args = args
self.rgb = torch.from_numpy(load_rgb(self.args.image_path)).cuda()
render_res = self.rgb.shape[:2]
self.render_res = render_res
print("Controls:")
print("h,l: switch optimization modes")
print("j,k: switch display buffer")
print("q : quit simulation")
print("n : begin simulation")
super().__init__(width=render_res[1], height=render_res[0],
fullscreen=False, config=app.configuration.get_default())
import pycuda.gl.autoinit
import pycuda.gl
assert torch.cuda.is_available()
print('using GPU {}'.format(torch.cuda.current_device()))
self.buffer = torch.zeros(*render_res, 4, device='cuda')
self.camera_origin = np.array([2.5, 2.5, 2.5])
self.world_transform = np.eye(3)
self.tex, self.cuda_buffer = create_shared_texture(self.width, self.height, 4)
vertex = """
uniform float scale;
attribute vec2 position;
attribute vec2 texcoord;
varying vec2 v_texcoord;
void main()
{
v_texcoord = texcoord;
gl_Position = vec4(scale*position, 0.0, 1.0);
} """
fragment = """
uniform sampler2D tex;
varying vec2 v_texcoord;
void main()
{
gl_FragColor = texture2D(tex, v_texcoord);
} """
self.screen = gloo.Program(vertex, fragment, count=4)
self.screen['position'] = [(-1,-1), (-1,+1), (+1,-1), (+1,+1)]
self.screen['texcoord'] = [(0,0), (0,1), (1,0), (1,1)]
self.screen['scale'] = 1.0
self.screen['tex'] = self.tex
#self.mode = "stable_fluids"
self.mode = "neuralff"
self.display_modes = ["rgb", "pressure", "velocity", "rho", "divergence", "euler"]
self.display_mode_idx = 0
self.display_mode = self.display_modes[self.display_mode_idx]
self.optim_modes = ["euler", "divergence-free", "split"]
self.optim_mode_idx = 0
self.optim_mode = self.optim_modes[self.optim_mode_idx]
self.max_euler_error = 0.0
self.max_divergence_error = 0.0
self.curr_error = 0.0
self.optim_switch = False
self.begin_switch = False
def on_draw(self, dt):
title = f"FPS: {self.fps:.3f}"
title += f" Buffer: {self.display_mode}"
if self.display_mode == "divergence" or self.display_mode == "euler":
title += f" Error: {self.curr_error:.3e}"
title += f" Optimizing: {self.optim_mode}"
self.set_title(title.encode("ascii"))
tex = self.screen['tex']
h,w = tex.shape[:2]
# render with pytorch
state = torch.zeros(*self.render_res, 4, device='cuda')
coords = nff_ops.normalized_grid_coords(*self.render_res)
out = self.render(coords)
write_dim = out.shape[-1]
state[...,:write_dim] = out
state[...,3] = 1
state = torch.flip(state, [0])
img = (255*state).byte().contiguous()
# copy from torch into buffer
assert tex.nbytes == img.numel()*img.element_size()
with cuda_activate(self.cuda_buffer) as ary:
cpy = pycuda.driver.Memcpy2D()
cpy.set_src_device(img.data_ptr())
cpy.set_dst_array(ary)
cpy.width_in_bytes = cpy.src_pitch = cpy.dst_pitch = tex.nbytes//h
cpy.height = h
cpy(aligned=False)
torch.cuda.synchronize()
# draw to screen
self.clear()
self.screen.draw(gl.GL_TRIANGLE_STRIP)
def on_close(self):
pycuda.gl.autoinit.context.pop()
####################################
# Application specific code
####################################
def on_key_press(self, symbol, modifiers):
if symbol == 75: # k
self.display_mode_idx = (self.display_mode_idx + 1) % len(self.display_modes)
self.display_mode = self.display_modes[self.display_mode_idx]
elif symbol == 74: # j
self.display_mode_idx = (self.display_mode_idx - 1) % len(self.display_modes)
self.display_mode = self.display_modes[self.display_mode_idx]
elif symbol == 81: # q
self.close()
elif symbol == 78: # n
self.optim_switch = not self.optim_switch
elif symbol == 76: # l
self.optim_mode_idx = (self.optim_mode_idx + 1) % len(self.optim_modes)
self.optim_mode = self.optim_modes[self.optim_mode_idx]
elif symbol == 72: # h
self.optim_mode_idx = (self.optim_mode_idx - 1) % len(self.optim_modes)
self.optim_mode = self.optim_modes[self.optim_mode_idx]
elif symbol == 66: # b
self.begin_switch = not self.begin_switch
def init_state(self):
#self.gravity = 1e-4
#self.timestep = 1e-1
#self.timestep = 5e-2
#self.timestep = 1e-5
#self.timestep = 5e-3
#self.timestep = 5e-2
self.timestep = 1e-1
self.image_coords = nff_ops.normalized_grid_coords(self.height, self.width, aspect=False, device="cuda")
self.image_coords[...,1] *= -1
if self.mode == "stable_fluids":
self.grid_width = self.width // 8
self.grid_height = self.height // 8
elif self.mode == "neuralff":
velocity_field_config = {
"input_dim" : 2,
"output_dim" : 2,
"hidden_activation" : torch.sin,
"output_activation" : None,
"bias" : True,
"num_layers" : 4,
"hidden_dim" : 128,
}
self.velocity_field = neuralff.NeuralField(**velocity_field_config).cuda()
pressure_field_config = {
"input_dim" : 2,
"output_dim" : 1,
"hidden_activation" : torch.sin,
"output_activation" : None,
"bias" : True,
"num_layers" : 4,
"hidden_dim" : 128,
}
self.pressure_field = neuralff.NeuralField(**pressure_field_config).cuda()
self.rho_field = neuralff.ImageDensityField(self.height, self.width)
self.rho_field.update(self.rgb)
self.pc_lr = self.args.pc_lr
self.precondition_optimizer = optim.Adam([
{"params": self.velocity_field.parameters(), "lr":self.pc_lr},
{"params": self.pressure_field.parameters(), "lr":self.pc_lr},
])
self.lr = self.args.lr
self.optimizer = optim.Adam([
#self.optimizer = optim.SGD([
{"params": self.velocity_field.parameters(), "lr":self.lr},
{"params": self.pressure_field.parameters(), "lr":self.lr},
])
if self.args.precondition:
self.precondition()
def precondition(self):
num_batch = self.args.pc_num_batch
batch_size = self.args.pc_batch_size
epochs = self.args.pc_epochs
pts = torch.rand([batch_size*num_batch, 2], device='cuda') * 2.0 - 1.0
initial_velocity = self.velocity_field.sample(pts).detach()
print("Preconditioning body forces...")
for i in tqdm.tqdm(range(epochs)):
for j in range(num_batch):
self.velocity_field.zero_grad()
self.pressure_field.zero_grad()
loss = nff_ops.body_forces_loss(
pts[j*batch_size:(j+1)*batch_size],
self.velocity_field, self.timestep,
initial_velocity=initial_velocity[j*batch_size:(j+1)*batch_size])
loss = loss.mean()
loss.backward()
self.precondition_optimizer.step()
print("Preconditioning divergence...")
for i in tqdm.tqdm(range(epochs)):
for j in range(num_batch):
self.velocity_field.zero_grad()
self.pressure_field.zero_grad()
loss = nff_ops.divergence_free_loss(
pts[j*batch_size:(j+1)*batch_size],
self.velocity_field)
loss = loss.mean()
loss.backward()
self.precondition_optimizer.step()
initial_velocity = self.velocity_field.sample(pts).detach()
print("Preconditioning Euler...")
for i in tqdm.tqdm(range(epochs)):
for j in range(num_batch):
self.velocity_field.zero_grad()
self.pressure_field.zero_grad()
loss = nff_ops.euler_loss(
pts[j*batch_size:(j+1)*batch_size],
self.velocity_field,
self.pressure_field, self.rho_field, self.timestep,
initial_velocity=initial_velocity[j*batch_size:(j+1)*batch_size])
loss = loss.mean()
loss.backward()
self.precondition_optimizer.step()
def render(self, coords):
self.optimizer = optim.Adam([
#self.optimizer = optim.SGD([
{"params": self.velocity_field.parameters(), "lr":self.lr},
{"params": self.pressure_field.parameters(), "lr":self.lr},
])
if self.mode == "stable_fluids":
# Add external forces
self.velocity_field.vector_field[..., 1] += 9.8 * self.timestep * self.gravity
# Remove divergence
#self.velocities = remove_divergence(self.velocities, self.x_mapper, self.y_mapper)
elif self.optim_switch:
for i in range(6):
self.pressure_field.zero_grad()
self.velocity_field.zero_grad()
pts = torch.rand([self.args.batch_size, 2], device=coords.device) * 2.0 - 1.0
if self.optim_mode == "divergence-free":
loss = nff_ops.divergence_free_loss(pts, self.velocity_field)
elif self.optim_mode == "split":
loss = nff_ops.body_forces_loss(pts, self.velocity_field, self.timestep) +\
nff_ops.incompressibility_loss(pts, self.velocity_field, self.pressure_field,
self.rho_field, self.timestep)
elif self.optim_mode == "euler":
loss = nff_ops.euler_loss(pts, self.velocity_field,
self.pressure_field, self.rho_field, self.timestep)
loss.mean().backward()
self.optimizer.step()
with torch.no_grad():
if self.display_mode == "rgb":
if self.begin_switch:
self.rho_field.update(nff_ops.semi_lagrangian_advection(
self.image_coords, self.rho_field.vector_field, self.velocity_field, self.timestep))
return self.rho_field.vector_field
elif self.display_mode == "pressure":
return (1.0 + self.pressure_field.sample(self.image_coords)) / 2.0
elif self.display_mode == "velocity":
return (1.0 + F.normalize(self.velocity_field.sample(self.image_coords), dim=-1)) / 2.0
elif self.display_mode == "rho":
rfsample = self.rho_field.sample(self.image_coords)
return rfsample / rfsample.max()
elif self.display_mode == "divergence":
div = nff_ops.divergence(self.image_coords, self.velocity_field, method='finitediff')**2
err = div.max()
self.curr_error = err
self.max_divergence_error = max(err, self.max_divergence_error)
return div / self.max_divergence_error
#return div / err
elif self.display_mode == "euler":
loss = nff_ops.euler_loss(self.image_coords, self.velocity_field,
self.pressure_field, self.rho_field, self.timestep)
err = loss.max()
self.curr_error = err
self.max_euler_error = max(err, self.max_euler_error)
#return loss / self.max_euler_error
return loss / err
else:
return torch.zeros_like(coords)
def parse_options():
parser = argparse.ArgumentParser(description='Fluid simulation with neural networks.')
# Global arguments
global_group = parser.add_argument_group('global')
global_group.add_argument('--lr', type=float, default=1e-6,
help='Learning rate for the simulation.')
global_group.add_argument('--batch_size', type=int, default=4096,
help='Batch size for the simulation.')
global_group.add_argument('--pc_lr', type=float, default=1e-6,
help='Learning rate for the preconditioner.')
global_group.add_argument('--pc_batch_size', type=int, default=4096,
help='Batch size for the preconditioner.')
global_group.add_argument('--pc_num_batch', type=int, default=10,
help='Number of batches to use for the preconditioner.')
global_group.add_argument('--pc_epochs', type=int, default=100,
help='Number of epochs to train the | |
"gasket",
"deviate",
"castiel",
"behaviors",
"bide",
"eckhart",
"speakerphone",
"'t--i",
"christmases",
"philharmonic",
"jittery",
"percussion",
"dominates",
"unwrap",
"clovis",
"rwanda",
"jilted",
"deterrent",
"installations",
"valkyrie",
"informers",
"haughty",
"iwo",
"briar",
"audra",
"oakley",
"overhaul",
"searing",
"multiplying",
"inactive",
"solos",
"qualifying",
"pizzeria",
"prevails",
"expressway",
"laxative",
"convene",
"thebes",
"hairbrush",
"castrate",
"molina",
"mette",
"solano",
"lupo",
"bachchan",
"shortness",
"watchtower",
"ando",
"planetarium",
"back-to-back",
"advertisements",
"mamas",
"awakens",
"idealism",
"losin",
"surveys",
"restriction",
"cosa",
"nexus",
"callaghan",
"tusks",
"copperfield",
"artistry",
"spewing",
"barabbas",
"deepa",
"aviator",
"stopwatch",
"no-no-no",
"thyme",
"shania",
"boatman",
"jeter",
"blubbering",
"effectiveness",
"devotees",
"boise",
"diets",
"humvee",
"wanking",
"grogan",
"pocahontas",
"professions",
"crandall",
"blistering",
"vandal",
"churn",
"grinds",
"mommies",
"tobey",
"motown",
"medicated",
"fritters",
"doping",
"counteract",
"conundrum",
"folsom",
"thurston",
"nervousness",
"dumpty",
"mignon",
"nuthouse",
"egotistical",
"auctioned",
"eradicated",
"mathematically",
"exceeding",
"serb",
"mackay",
"overpowered",
"glistening",
"grooves",
"swoon",
"landers",
"proactive",
"where-",
"villas",
"kristian",
"flanks",
"blogger",
"myrna",
"clique",
"synonymous",
"uncut",
"left-wing",
"calvert",
"affectionately",
"pisa",
"jargon",
"lurk",
"quarreling",
"montmartre",
"nominal",
"decreasing",
"extensively",
"forlorn",
"embers",
"juncture",
"aguilera",
"drc",
"leno",
"maquis",
"jackman",
"persecute",
"sο",
"meticulously",
"bluntly",
"vail",
"eeg",
"helluva",
"crocs",
"joachim",
"deok",
"pigtails",
"fleets",
"apocalyptic",
"rt",
"barbecued",
"digit",
"resembled",
"winky",
"conglomerate",
"tarrant",
"doppelganger",
"forgeries",
"vases",
"willies",
"shiloh",
"embargo",
"badgering",
"affirm",
"backgammon",
"joked",
"blabbing",
"slant",
"saboteur",
"exponentially",
"chieftain",
"takedown",
"waco",
"chiba",
"loudmouth",
"lacroix",
"romulans",
"filip",
"bangers",
"dicey",
"metaphysical",
"gamer",
"softened",
"domenico",
"introduces",
"aqueduct",
"forty-two",
"deathly",
"henley",
"sinan",
"emory",
"sanada",
"yasir",
"clothe",
"downwards",
"zephyr",
"actin",
"moloch",
"camino",
"codex",
"scribe",
"dishonored",
"asunder",
"torben",
"specialised",
"sag",
"anju",
"sl",
"lambda",
"paedophile",
"caressed",
"beckman",
"bei",
"clavicle",
"lids",
"homos",
"philby",
"foliage",
"favourable",
"emits",
"first-hand",
"huckleberry",
"nee",
".and",
"entrapment",
"defamation",
"scoreboard",
"massaging",
"perth",
"emcee",
"migrating",
"laziness",
"westside",
"tortilla",
"modeled",
"corrado",
"macintosh",
"worthington",
"spelt",
"tempers",
"tresses",
"coulter",
"revel",
"moulin",
"argus",
"bering",
"electronically",
"terrestrial",
"usb",
"headband",
"zander",
"cumberland",
"nosing",
"petr",
"wassup",
"practitioner",
"hitchhiker",
"gangmo",
"siva",
"bannon",
"even-",
"thru",
"wipers",
"langdon",
"trickling",
"flip-flops",
"punitive",
"sniveling",
"victimized",
"kamen",
"headlight",
"tweaked",
"boggs",
"blooded",
"painkiller",
"anatoly",
"sharpest",
"braverman",
"congenital",
"okada",
"prunes",
"'was",
"fora",
"amok",
"playhouse",
"peat",
"riverbank",
"assessed",
"simba",
"spitfire",
"ig",
"christen",
"mcgraw",
"barista",
"cochise",
"flippers",
"torrent",
"replenish",
"buckshot",
"scouring",
"lilo",
"doped",
"nairobi",
"fellini",
"arggh",
"blubber",
"nuptial",
"fantasizing",
"whitfield",
"amara",
"mikes",
"bhola",
"hattori",
"padlock",
"redwood",
"ghb",
"off-road",
"kyra",
"alerts",
"grieves",
"y-y-you",
"townhouse",
"solicitors",
"zagreb",
"stowed",
"unregistered",
"houlihan",
"mullen",
"flatline",
"sade",
"snowden",
"uniformed",
"deplorable",
"grossly",
"octus",
"fascinates",
"workman",
"sketching",
"triangulate",
"maurizio",
"winked",
"upstart",
"age-old",
"spaniel",
"achtung",
"yusuke",
"employs",
"tannoy",
"pathogen",
"reprogram",
"pp",
"entertainers",
"ani",
"again-",
"fatherly",
"barricaded",
"foreclosure",
"origami",
"muddle",
"jean-paul",
"parenthood",
"cooley",
"henriette",
"lattes",
"revenues",
"seb",
"onslaught",
"pune",
"importing",
"sharpening",
"footman",
"grueling",
"tiwari",
"mother-",
"hittin",
"saya",
"self-made",
"tripoli",
"cinco",
"archduke",
"inaugural",
"roving",
"arousing",
"mathematicians",
"lucknow",
"jadzia",
"amazement",
"yamaguchi",
"testifies",
"gerbil",
"cautiously",
"buttoned",
"'un",
"lowdown",
"heron",
"cores",
"bouts",
"reconstructed",
"smite",
"nerve-racking",
"dundee",
"tomcat",
"tourette",
"anime",
"landowner",
"gannon",
"bloodhound",
"shitter",
"blam",
"outlined",
"shoelace",
"vaporized",
"sprite",
"delphi",
"cranium",
"lettin",
"gomorrah",
"pcp",
"totaled",
"impart",
"jinxed",
"enrich",
"rewinding",
"peek-a-boo",
"aqui",
"mcgovern",
"machiko",
"asperger",
"payphone",
"sona",
"hotdog",
"pseudonym",
"wallop",
"mahi",
"wallowing",
"splendour",
"zig",
"'th",
"typist",
"marga",
"overcrowded",
"inferiority",
"farr",
"ember",
"trigger-happy",
"haruka",
"senna",
"principals",
"hand-to-hand",
"fillings",
"laxman",
"manchu",
"lapping",
"bustin",
"break-ins",
"geum",
"stresses",
"comprende",
"jackhammer",
"hunnicutt",
"worn-out",
"vasily",
"mortem",
"finalist",
"p.e.",
"farah",
"victors",
"crumpled",
"infecting",
"prevailing",
"seaquest",
"buckner",
"teahouse",
"rationing",
"kardashian",
"durst",
"nyah",
"degrade",
"breakthroughs",
"evaluated",
"pally",
"hyperventilating",
"grandfathers",
"diverting",
"boosters",
"czechs",
"kha",
"bunks",
"sashimi",
"reina",
"gabriela",
"dictators",
"emitting",
"innermost",
"adversaries",
"willi",
"bahia",
"lego",
"inciting",
"bunting",
"minstrel",
"windmills",
"sayings",
"hunky",
"shale",
"larynx",
"eskimos",
"beretta",
"decedent",
"kryptonian",
"canyons",
"journalistic",
"residing",
"impala",
"dauphin",
"sliver",
"handkerchiefs",
"'mores",
"fredrik",
"knitted",
"rajan",
"name-",
"forklift",
"butterscotch",
"burping",
"non-existent",
"mp3",
"soiree",
"hd",
"steroid",
"unturned",
"putrid",
"luk",
"urdu",
"rearview",
"deflector",
"ahsoka",
"sappy",
"nightie",
"gluten-free",
"telecast",
"peddle",
"envision",
"backwater",
"monseigneur",
"catholicism",
"copier",
"rafferty",
"hirsch",
"intertwined",
"improvisation",
"k.c.",
"standish",
"ravenous",
"jakey",
"digby",
"capitals",
"earthling",
"baze",
"suez",
"surging",
"daytona",
"appleby",
"kangaroos",
"cipri",
"raps",
"eliminates",
"leni",
"callisto",
"ofher",
"scone",
"shorted",
"whelan",
"h-hey",
"honeys",
"trembles",
"edouard",
"sir-",
"mija",
"mythological",
"bianchi",
"munster",
"matheson",
"nadu",
"undecided",
"sungkyunkwan",
"stumps",
"schooled",
"advocates",
"carols",
"outage",
"ulf",
"pigment",
"thumbprint",
"petitions",
"ivar",
"humph",
"stateside",
"dorsey",
"makeups",
"dupe",
"crackle",
"collage",
"boogeyman",
"outspoken",
"fiirst",
"redecorate",
"creole",
"haired",
"guess-",
"yamazaki",
"ninety-nine",
"kirkland",
"livers",
"reade",
"afresh",
"'even",
"transpired",
"shackled",
"chivalrous",
"fen",
"handbags",
"semifinals",
"two-year-old",
"iqbal",
"fatally",
"'ed",
"eyelid",
"tsoukalos",
"regiments",
"munro",
"turret",
"tomie",
"stimuli",
"profess",
"benevolence",
"shoot-out",
"belvedere",
"re-election",
"tybalt",
"cocksuckers",
"panicky",
"canoes",
"fillmore",
"'ai",
"trinidad",
"well-trained",
"stumpy",
"hippos",
"obstruct",
"rowe",
"aslam",
"deliberation",
"impregnated",
"knockin",
"toga",
"reasoned",
"comme",
"apologised",
"tora",
"indescribable",
"leena",
"usefulness",
"argues",
"tommaso",
"bloodstains",
"commoners",
"anxieties",
"marilla",
"crabby",
"num",
"chett",
"sathya",
"shamelessly",
"reconsidered",
"erectus",
"upheaval",
"constraints",
"earplugs",
"childlike",
"whoa-oh",
"colbert",
"cas",
"bastion",
"vinick",
"lifelike",
"hijackers",
"latham",
"denver-carrington",
"obscured",
"mosca",
"unselfish",
"alexi",
"artefacts",
"hiram",
"hud",
"'hare",
"shitheads",
"williamsburg",
"doggies",
"contessa",
"nuh",
"disband",
"'like",
"braised",
"adequately",
"dishonour",
"cancers",
"cybertron",
"uncovering",
"zebras",
"postponing",
"fraught",
"a.a.",
"skedaddle",
"stian",
"irreparable",
"amador",
"lustful",
"portsmouth",
"shopper",
"dae-woong",
"wory",
"mehra",
"seeley",
"barkeep",
"maitland",
"nikos",
"hardwood",
"hoo-hoo",
"riddler",
"follies",
"clapped",
"perfumed",
"ecology",
"unbecoming",
"enhancement",
"marston",
"tilda",
"discern",
"rios",
"za",
"charlton",
"lenox",
"aoyama",
"brigades",
"venues",
"verna",
"servitude",
"xerox",
"mains",
"a.m",
"pagans",
"poach",
"pop-up",
"receptors",
"constipation",
"shadowing",
"flamboyant",
"punters",
"wozniak",
"cravings",
"clawed",
"scot-free",
"d.e.a.",
"courted",
"zoos",
"mobiles",
"mowgli",
"militants",
"merton",
"rafters",
"pedophiles",
"kama",
"has-been",
"amphetamines",
"tadpole",
"peacekeeper",
"masterful",
"ortho",
"regulator",
"hajji",
"streaks",
"yearns",
"muchas",
"immerse",
"mischa",
"freebie",
"clenched",
"limiting",
"costco",
"get-go",
"itwas",
"fortune-teller",
"punt",
"deafening",
"court-martialed",
"deng",
"subrip",
"opt",
"byzantium",
"her.",
"zenith",
"knoll",
"exeter",
"bozos",
"uhuh",
"fess",
"minami",
"birkhoff",
"weathers",
"wonka",
"fantastically",
"ukulele",
"westerners",
"whaddya",
"abalone",
"defected",
"doozy",
"impertinence",
"sowing",
"p.m",
"boyce",
"raspy",
"yusef",
"shimmer",
"sheri",
"cray",
"punishes",
"rodger",
"rectory",
"dad-",
"ithaca",
"overhearing",
"emulate",
"dugout",
"landowners",
"gaye",
"renu",
"laptops",
"ranging",
"marburg",
"abernathy",
"disintegrated",
"porto",
"corp.",
"risa",
"django",
"routinely",
"kanji",
"granola",
"bahadur",
"barrington",
"racketeering",
"endorsed",
"jaded",
"seltzer",
"rehabilitated",
"rox",
"nagar",
"chums",
"kahlan",
"royale",
"clashes",
"wimpy",
"haggle",
"sips",
"airman",
"astra",
"00am",
"lifeboats",
"smothering",
"o-neg",
"memorandum",
"refinement",
"chariots",
"grading",
"byun",
"thirties",
"cinematographer",
"profane",
"surges",
"bride-to-be",
"underwent",
"cutbacks",
"absent-minded",
"andersson",
"demos",
"qualms",
"fornication",
"beater",
"twenty-nine",
"hitchhike",
"hatfield",
"geometric",
"gio",
"pereira",
"cornbread",
"decision-making",
"musketeer",
"larue",
"ghostbusters",
"sorbonne",
"suggestive",
"trimester",
"unseemly",
"evenin",
"goldar",
"aish",
"turing",
"doer",
"42nd",
"excluding",
"10k",
"objectivity",
"tou",
"resuscitate",
"indicative",
"keyed",
"wavy",
"coot",
"fait",
"ceasefire",
"soon-to-be",
"unexplored",
"muppets",
"daddy-o",
"tamura",
"gossips",
"aides",
"stately",
"spines",
"cornfield",
"kaye",
"conquers",
"tolerable",
"bionic",
"peroxide",
"imposition",
"kana",
"hornets",
"exorcise",
"michelin",
"cemal",
"southbound",
"beaker",
"commenting",
"smooch",
"drunkards",
"bakshi",
"whizzing",
"'onn",
"blacky",
"chumps",
"5-year-old",
"gillespie",
"juilliard",
"uplink",
"clobber",
"madan",
"theseus",
"alton",
"aaagh",
"wantto",
"wh-why",
"aditi",
"betrothal",
"goldsmith",
"gsw",
"allo",
"'can",
"wolff",
"totes",
"abstain",
"-and",
"lymphoma",
"elated",
"oldies",
"grassy",
"pessimist",
"joffrey",
"missin",
"untoward",
"puddles",
"cagney",
"sauces",
"squabble",
"nanette",
"phantoms",
"midterms",
"engraving",
"floored",
"isi",
"translators",
"mancini",
"statistic",
"léon",
"resync",
"byzantine",
"predicts",
"22-year-old",
"tutti",
"wrappers",
"wylie",
"thoroughbred",
"fairs",
"payton",
"jor-el",
"maneuvering",
"symbiote",
"chechen",
"gratification",
"recruiter",
"rentals",
"sky-high",
"mehta",
"complacent",
"undetectable",
"throwin",
"bargains",
"westerns",
"syne",
"forbidding",
"ingrate",
"cheerfully",
"sakai",
"gruff",
"impersonation",
"remover",
"corinth",
"usc",
"infancy",
"folders",
"ungodly",
"tarantula",
"uneventful",
"somali",
"opposes",
"este",
"nui",
"holi",
"tomoko",
"marr",
"fittings",
"buckled",
"cordoba",
"emporium",
"stay-at-home",
"phosphorus",
"vii",
"authorisation",
"forts",
"medea",
"smithereens",
"dewitt",
"bookkeeping",
"byers",
"ashe",
"assessing",
"nie",
"coventry",
"renovate",
"dirtier",
"'mere",
"sucky",
"biking",
"quadruple",
"wasabi",
"hines",
"halsey",
"legitimately",
"freudian",
"tromaville",
"stagger",
"overpass",
"honduras",
"schumann",
"depositions",
"cragen",
"ionger",
"flay",
"jos",
"lido",
"radium",
"swill",
"interplanetary",
"eugh",
"achmed",
"unhinged",
"bassam",
"mins",
"xie",
"substantially",
"uhura",
"hickory",
"pancreatic",
"chalmers",
"iou",
"baal",
"concubines",
"cease-fire",
"shrug",
"toma",
"arkham",
"reptilian",
"lombardo",
"hard-boiled",
"shirtless",
"laughin",
"whistled",
"navigating",
"viet",
"half-wit",
"chandni",
"allotted",
"life-form",
"impregnable",
"strewn",
"swerve",
"innovations",
"massacres",
"kudo",
"descartes",
"wordsworth",
"alastair",
"rawlings",
"auditor",
"albie",
"checkered",
"bord1.5",
"dorado",
"puffing",
"contusion",
"hounded",
"abject",
"medevac",
"economies",
"trampling",
"handouts",
"scabs",
"ailments",
"straitjacket",
"dumbledore",
"derailed",
"selfies",
"intolerance",
"haddock",
"bleached",
"digested",
"thanos",
"vanderbilt",
"unintentionally",
"razors",
"clump",
"poon",
"ilona",
"knut",
"monasteries",
"in-depth",
"stockade",
"proust",
"lucrecia",
"barret",
"convulsions",
"sunbathing",
"fixtures",
"undercooked",
"flemming",
"sharky",
"sawed",
"yugo",
"quartermaster",
"stand-in",
"khaki",
"hur",
"apathy",
"embolism",
"dignitaries",
"starscream",
"panorama",
"ricki",
"regimental",
"aint",
"deploying",
"oceanic",
"magnate",
"mongo",
"synth",
"mong",
"salma",
"sprays",
"labrador",
"seashore",
"collusion",
"holbrook",
"detainees",
"lowlifes",
"mishima",
"pollute",
"complicit",
"thirdly",
"horowitz",
"quarrelling",
"orsini",
"god-awful",
"overstepped",
"firework",
"snipe",
"retriever",
"cleanly",
"vizier",
"10-x",
"fizzy",
"unafraid",
"jinn",
"prenatal",
"waver",
"pianos",
"badgers",
"untrained",
"naka",
"eighties",
"lilah",
"sadako",
"r.j.",
"carpe",
"handlers",
"mee",
"at-",
"entails",
"gilliam",
"wildebeest",
"ulises",
"fiind",
"teaspoon",
"splutters",
"reunions",
"statesman",
"chupacabra",
"father-",
"caplan",
"incorrectly",
"iaw",
"woodpecker",
"groundwork",
"delicacies",
"'kar",
"pineapples",
"cultivating",
"commented",
"cufflinks",
"composers",
"onedin",
"dreamin",
"amenities",
"snooty",
"cosima",
"hilly",
"whaling",
"foiled",
"vectors",
"mckinney",
"thérèse",
"slowest",
"decoded",
"turbines",
"debatable",
"rhinos",
"repo",
"cheri",
"unopened",
"evo",
"decompression",
| |
import os
from os.path import dirname, join, basename, exists
from typing import List, Tuple, Callable, Union
import itertools
import numpy as np
import pandas as pd
from tqdm import tqdm
from PIL import Image, ImageDraw
from openslide import OpenSlide
from ._functional import (
get_thumbnail,
get_downsamples,
try_thresholds,
resize
)
from .preprocess.functional import preprocess, tissue_mask
from ._czi_reader import OpenSlideCzi
from .helpers._utils import (
remove_extension,
remove_images,
multiprocess_map
)
from ._logger import logger
__all__ = [
'Cutter',
'TMACutter'
]
class Cutter(object):
"""
Cut tiles from histological images.
This class detectct tissue on the slide and cuts tiles of desired width
from the image.
Args:
slide_path (str): Path to the slide image. All formats that are
supported by openslide can be used.
width (int): Tile width.
overlap (float, optional): Overlap between neighbouring tiles. Defaults
to 0.0.
threshold (int or float, optional): Threshold value for tissue
detection. Defaults to 1.1.
If threshold is an integer between [1, 255]:
This value will be used as an threshold for tissue detection.
Different thresholds can be easily searched with the
Cutter.try_thresholds() function.
If threshold is a float:
In this case, Otsu's binarization is used and the found
threshold is multiplied by `threshold` as Otsu isn't optimal
for histological images.
downsample (int, optional): Downsample used for the thumbnail.
When a lower downsample is used, the thumbnail-based background
detection is more accurate but slower. Good results are achieved
with downsample=16. Defaults to 16.
max_background (float, optional): Maximum amount of background allowed
for a tile. Due to the thumbnail-based background detection, tiles
with higher background percentage may pass through but rarely the
other way around. Defaults to 0.999.
create_thumbnail (bool, optional): Create a thumbnail if downsample is
not available. Defaults to False.
thumbnail_path (str, optional): Load a created thumbnail from a file.
Defaults to None.
Raises:
IOError: slide_path not found.
ValueError: downsample is not available and create_thumbnail=False.
IOError: thumbnail_path not found.
Example::
import histoprep as hp
cutter = hp.Cutter(slide_path='path/to/slide', width=512, overlap=0.2)
metadata = cutter.save('/path/to/output_dir')
"""
def __init__(
self,
slide_path: str,
width: int,
overlap: float = 0.0,
threshold: Union[int, float] = 1.1,
downsample: int = 16,
max_background: float = 0.999,
create_thumbnail: bool = False,
thumbnail_path: str = None):
super().__init__()
# Define slide reader.
if not exists(slide_path):
raise IOError(f'{slide_path} not found.')
if slide_path.endswith('czi'):
logger.warning(
"Support for czi-files is in alpha phase! If "
"you run into errors, please submit an issue to "
"https://github.com/jopo666/HistoPrep/issues"
)
self.reader = OpenSlideCzi(slide_path)
self._czi = True
else:
self.reader = OpenSlide(slide_path)
self._czi = False
# Assing basic stuff that user can see/check.
self.slide_path = slide_path
self.slide_name = remove_extension(basename(slide_path))
self.dimensions = self.reader.dimensions
self.downsample = downsample
self.width = width
self.overlap = overlap
self.threshold = threshold
self.max_background = max_background
self.all_coordinates = self._get_all_coordinates()
# Filter coordinates.
if thumbnail_path is not None:
if not exists(thumbnail_path):
raise IOError(f'{thumbnail_path} not found.')
self.thumbnail = Image.open(thumbnail_path).convert('RGB')
else:
self.thumbnail = get_thumbnail(
slide_path=self.slide_path,
downsample=self.downsample,
create_thumbnail=create_thumbnail
)
if self.thumbnail is None:
# Downsample not available.
raise ValueError(
f'Thumbnail not available for downsample {self.downsample}. '
'Please set create_thumbnail=True or select downsample from\n'
f'{self._downsamples()}'
)
self.threshold, self._tissue_mask = tissue_mask(
image=self.thumbnail,
threshold=self.threshold,
return_threshold=True
)
self.filtered_coordinates = self._filter_coordinates()
# Annotate thumbnail
self._annotate()
def __repr__(self):
return self.__class__.__name__ + '()'
def __len__(self):
return len(self.filtered_coordinates)
def available_downsamples(self):
"""
Returns available downsamples for the slide.
"""
print(self._downsamples())
def _downsamples(self):
string = 'Downsample Dimensions'
if self._czi:
d = {1: self.dimensions}
else:
d = get_downsamples(self.slide_path)
for item, val in d.items():
string += f'\n{str(item).ljust(12)}{val}'
return string
def summary(self):
"""Returns a summary of the cutting process."""
print(self._summary())
def _summary(self):
return (
f"{self.slide_name}"
f"\n Tile width: {self.width}"
f"\n Tile overlap: {self.overlap}"
f"\n Threshold: {self.threshold}"
f"\n Max background: {self.max_background}"
f"\n Thumbnail downsample: {self.downsample}"
f"\n Total number of tiles: {len(self.all_coordinates)}"
f"\n After background filtering: {len(self.filtered_coordinates)}"
)
def get_annotated_thumbnail(self,
max_pixels: int = 1_000_000) -> Image.Image:
"""
Returns an Pillow Image of the annotated thumbnail for inspection.
Args:
max_pixels (int, optional): Downsample the image until the image
has less than max_pixles pixels. Defaults to 1_000_000.
Returns:
Image.Image: Annotated thumbnail.
"""
return resize(self._annotated_thumbnail, max_pixels)
def get_thumbnail(self, max_pixels: int = 1_000_000) -> Image.Image:
"""
Returns an Pillow Image of the thumbnail for inspection.
Args:
max_pixels (int, optional): Downsample the image until the image
has less than max_pixles pixels. Defaults to 1_000_000.
Returns:
Image.Image: Thumbnail.
"""
return resize(self.thumbnail, max_pixels)
def get_tissue_mask(self, max_pixels: int = 1_000_000) -> Image.Image:
"""
Returns an Pillow Image of the tissue mask for inspection.
Args:
max_pixels (int, optional): Downsample the image until the image
has less than max_pixles pixels. Defaults to 1_000_000.
Returns:
Image.Image: Tissue mask.
"""
mask = self._tissue_mask
# Flip for a nicer image
mask = 1 - mask
mask = mask/mask.max()*255
mask = Image.fromarray(mask.astype(np.uint8))
return resize(mask, max_pixels)
def _prepare_directories(self, output_dir: str) -> None:
out_dir = join(output_dir, self.slide_name)
# Save paths.
self._meta_path = join(out_dir, 'metadata.csv')
self._thumb_path = join(out_dir, f'thumbnail_{self.downsample}.jpeg')
self._annotated_path = join(out_dir, 'thumbnail_annotated.jpeg')
self._param_path = join(out_dir, 'parameters.p')
self._summary_path = join(out_dir, 'summary.txt')
self._image_dir = join(out_dir, 'tiles')
# Make dirs.
os.makedirs(out_dir, exist_ok=True)
os.makedirs(self._image_dir, exist_ok=True)
def _annotate(self) -> None:
# Draw tiles to the thumbnail.
self._annotated_thumbnail = self.thumbnail.copy()
annotated = ImageDraw.Draw(self._annotated_thumbnail)
w = h = int(self.width/self.downsample)
for (x, y), __ in self.filtered_coordinates:
x_d = round(x/self.downsample)
y_d = round(y/self.downsample)
annotated.rectangle([x_d, y_d, x_d+w, y_d+h],
outline='red', width=4)
def try_thresholds(
self,
thresholds: List[int] = [250, 240, 230,
220, 200, 190, 180, 170, 160, 150, 140],
max_pixels: int = 1_000_000
) -> Image.Image:
"""
Try out different thresholds for tissue detection.
The function prepares tissue masks with given thresholds and slaps them
all together in one summary image.
Args:
thresholds (List[int], optional): Thresholds to try. Defaults to
[250, 240, 230, 220, 200, 190, 180, 170, 160, 150, 140].
max_pixels (int, optional): Downsample the image until the image
has less than max_pixles pixels. Defaults to 1_000_000.
Returns:
Image.Image: [description]
"""
return try_thresholds(thumbnail=self.thumbnail, thresholds=thresholds)
def save(
self,
output_dir: str,
overwrite: bool = False,
image_format: str = 'jpeg',
quality: int = 95,
custom_preprocess: Callable[[Image.Image], dict] = None
) -> pd.DataFrame:
"""
Save tile images and metadata.
The function saves all the detected tiles in the desired format. When
the acutal image is loaded into memory, basic preprocessing metrics are
computed and added to metadata for preprocessing.
Args:
output_dir (str): Parent directory for all output.
overwrite (bool, optional): This will **remove** all saved images,
thumbnail and metadata and save images again.. Defaults to
False.
image_format (str, optional): Format can be jpeg or png. Defaults
to 'jpeg'.
quality (int, optional): For jpeg compression. Defaults to 95.
custom_preprocess (Callable[[Image.Image], dict], optional): This is
intended for users that want to define their own preprocessing
function. The function must take a Pillow image as an input and
return a dictionary of desired metrics. Defaults to None.
Raises:
ValueError: Invalid image format.
Returns:
pd.DataFrame: Metadata.
"""
allowed_formats = ['jpeg', 'png']
if image_format not in allowed_formats:
raise ValueError(
'Image format {} not allowed. Select from {}'.format(
image_format, allowed_formats
))
self._prepare_directories(output_dir)
# Check if slide has been cut before.
if exists(self._meta_path) and not overwrite:
logger.warning(f'Slide has already been cut!')
return pd.read_csv(self._meta_path)
elif exists(self._meta_path) and overwrite:
# Remove all previous files.
os.remove(self._annotated_path)
os.remove(self._meta_path)
remove_images(self._image_dir)
# Save both thumbnails.
self.thumbnail.save(self._thumb_path, quality=95)
self._annotated_thumbnail.save(self._annotated_path, quality=95)
# Save used parameters. NOTE: Can't remember where I would need these...
# self._save_parameters()
# Save text summary.
with open(self._summary_path, "w") as f:
f.write(self._summary())
# Wrap the saving function so it can be parallized.
save_tile
func_args = {
'slide_path': self.slide_path,
'slide_name': self.slide_name,
'image_dir': self._image_dir,
'width': self.width,
'threshold': self.threshold,
'image_format': image_format,
'quality': quality,
'custom_preprocess': custom_preprocess,
}
# Multiprocessing to speed things up!
metadata = multiprocess_map(
func=save_tile,
lst=self.filtered_coordinates,
func_args=func_args,
desc=self.slide_name,
)
metadata = list(filter(lambda x: x is not None, metadata))
if len(metadata) == 0:
logger.warning(f'No tiles saved from slide {self.slide_path}!')
return
# Save metadata.
self.metadata = pd.DataFrame(metadata)
self.metadata.to_csv(self._meta_path, index=False)
return self.metadata
def _get_all_coordinates(self):
"""Return tile coordinates over the whole slide."""
x = [0]
y = [0]
overlap_px = int(self.width*self.overlap)
while x[-1] < self.dimensions[0]:
x.append(x[-1] + self.width - overlap_px)
x = x[:-1]
while | |
from pyinflect import getInflection
from helper_functions import join_punctuation
# This is the function were the FIB type for verbs is generated
def generate_fib_verb(retrieved_tuple):
fib_sentences = []
original_sentences = []
for tup in retrieved_tuple:
sentences_solution = tup[0]
tokens = tup[1]
lemmas = tup[2]
aux_index = tup[3]
verb_target_index = tup[4]
auxiliary = tup[5]
if auxiliary:
# If there is no word, interefering between the auxiliary and the target verb, we will only need one blank space.
# Otherwise we will need one blank space for the auxiliary and one for the target verb.
if not len(tokens[aux_index:verb_target_index]) == 1:
tokens[aux_index] = "____"
tokens[verb_target_index] = "____________({})".format(lemmas[verb_target_index])
else:
tokens[aux_index] = ""
tokens[verb_target_index] = "____________({})".format(lemmas[verb_target_index])
# create the final sentences
fib = ' '.join([str(word) for word in tokens])
# In the case of no auxiliary, just one blank will be necessary.
else:
tokens[verb_target_index] = "____________({})".format(lemmas[verb_target_index])
fib = ' '.join([str(word) for word in tokens])
# those are the actual FIB exercises
fib_sentences.append(fib)
# those will serve as exercise solutions
original_sentences.append(sentences_solution)
return (original_sentences, fib_sentences)
# The person number of a given verb, is necessary both for the multiple-choice as long as the Find-mistakes exercises
def retrieve_person_number(sentence_text, sentence_tokens, target_aux_index, exist_auxiliary, target_sentence_verb, spacy_model):
# obtain the verb auxiliary of the sentence in case it exists
if exist_auxiliary:
target_sentence_auxiliary = sentence_tokens[target_aux_index]
# initially we want to check that the Person number information is provided by the parser, otherwise we skip the sentence
verb_token_model = spacy_model(sentence_text)
person_number_found = False
if exist_auxiliary:
for token in verb_token_model:
# In case a verb in a target sentence has an auxiliary, the person and the number will be obtained based on it
if token.dep_ == 'aux' and token.text == target_sentence_auxiliary and token.head.text == target_sentence_verb:
target_person = token.morph.get("Person")
target_number = token.morph.get("Number")
# we need to find both the person and the number
if target_person and target_number:
person_number_found = True
# in case the person number was not provided with the auxiliary, we can also check for the subject of the verb
elif token.dep_ == 'nsubj' and token.head.text == target_sentence_verb:
target_person = token.morph.get("Person")
target_number = token.morph.get("Number")
if target_person and target_number:
person_number_found = True
if person_number_found:
break
# in case there is no auxiliary, the person number should be found by the subject of the verb
else:
for token in verb_token_model:
if token.dep_ == 'nsubj' and token.head.text == target_sentence_verb:
target_person = token.morph.get("Person")
target_number = token.morph.get("Number")
if target_person and target_number:
person_number_found = True
if person_number_found:
break
if person_number_found:
return (True, (target_person, target_number))
else:
return (False, (0,0))
# Formation of find-mistakes exercises
def generate_grammar_mistake(sentence_tokens, target_verb_index, target_aux_index, verb_tense, target_verb_lemma, person, number):
# For each verb tense, the transformation will be slightly different
# For the simple present, the verb is converted in a gerund verb form and an auxiliary is inserted given the right person.
if verb_tense == 'simple_present':
# change the verb form
changed_verb_form = getInflection(target_verb_lemma, tag='VBG', inflect_oov=True)
person = int(person)
number = str(number)
if person == 1 and number=='Sing':
auxiliary_change = 'am'
elif person == 2 and number=='Sing':
auxiliary_change = 'are'
elif person == 3 and number=='Sing':
auxiliary_change = 'is'
elif person == 1 and number=='Plur':
auxiliary_change = 'are'
elif person == 2 and number=='Plur':
auxiliary_change = 'are'
elif person == 3 and number=='Plur':
auxiliary_change = 'are'
sentence_tokens[target_verb_index] = changed_verb_form[0]
sentence_tokens.insert(target_verb_index, auxiliary_change)
# For the present progressive, the verb is converted in a base form and the auxiliary is deleted.
elif verb_tense == 'present_progressive':
if person == 3 and number == 'Sing':
changed_verb_form = getInflection(target_verb_lemma, tag='VBZ', inflect_oov=True)
else:
changed_verb_form = getInflection(target_verb_lemma, tag='VB', inflect_oov=True)
sentence_tokens[target_verb_index] = changed_verb_form[0]
del sentence_tokens[target_aux_index]
# For the simple past, the verb is converted into the gerund form and an auxiliary is inserted based on the person.
elif verb_tense == 'simple_past':
# change the verb form
changed_verb_form = getInflection(target_verb_lemma, tag='VBG', inflect_oov=True)
if person == 1 and number == 'Sing':
auxiliary_change = 'was'
elif person == 2 and number == 'Sing':
auxiliary_change = 'were'
elif person == 3 and number == 'Sing':
auxiliary_change = 'was'
elif person == 1 and number == 'Plur':
auxiliary_change = 'were'
elif person == 2 and number == 'Plur':
auxiliary_change = 'were'
elif person == 3 and number == 'Plur':
auxiliary_change = 'were'
sentence_tokens[target_verb_index] = changed_verb_form[0]
sentence_tokens.insert(target_verb_index, auxiliary_change)
# For the past progressive, the verb is converted into the past participle and the auxiliary is deleted.
elif verb_tense == 'past_progressive':
changed_verb_form = getInflection(target_verb_lemma, tag='VBN', inflect_oov=True)
sentence_tokens[target_verb_index] = changed_verb_form[0]
del sentence_tokens[target_aux_index]
# For the present perfect, the auxiliary is changed with the auxiliary of past perfect
elif verb_tense == 'present_perfect':
sentence_tokens[target_aux_index] = 'had'
# For the past perfect, the auxiliary is changed with the auxiliaries of present perfect, according the right number
elif verb_tense == 'past_perfect':
if person == 3 and number == 'Sing':
auxiliary_change = 'has'
else:
auxiliary_change = 'have'
sentence_tokens[target_aux_index] = auxiliary_change
error_injected_sentence = ' '.join(join_punctuation(sentence_tokens)).replace(' ',' ')
return error_injected_sentence
# Formation of multiple choice exercises
def generate_multiple_choice(sentence_tokens, target_verb_index, target_aux_index, verb_tense, target_verb_lemma, person, number):
generated_distractors = []
# For each verb tense, the transformation will be slightly different
# For the simple present, the verb is converted in a gerund verb form and an auxiliary is inserted given the right person.
if verb_tense == 'simple_present':
# change the verb form
changed_verb_form = getInflection(target_verb_lemma, tag='VBG', inflect_oov=True)
person = int(person)
number = str(number)
if person == 1 and number == 'Sing':
auxiliary_change = 'am'
elif person == 2 and number == 'Sing':
auxiliary_change = 'are'
elif person == 3 and number == 'Sing':
auxiliary_change = 'is'
elif person == 1 and number == 'Plur':
auxiliary_change = 'are'
elif person == 2 and number == 'Plur':
auxiliary_change = 'are'
elif person == 3 and number == 'Plur':
auxiliary_change = 'are'
correct_choice = sentence_tokens[target_verb_index]
wrong_choice = auxiliary_change + ' ' + changed_verb_form[0]
sentence_tokens[target_verb_index] = "____________"
fib_sentence = ' '.join(join_punctuation(sentence_tokens)).replace(' ', ' ')
generated_distractors.append([fib_sentence, correct_choice, wrong_choice])
# For the present progressive, the verb is converted in a base form and the auxiliary is deleted.
elif verb_tense == 'present_progressive':
if person == 3 and number == 'Sing':
changed_verb_form = getInflection(target_verb_lemma, tag='VBZ', inflect_oov=True)
else:
changed_verb_form = getInflection(target_verb_lemma, tag='VB', inflect_oov=True)
correct_choice = sentence_tokens[target_aux_index] + ' ' + sentence_tokens[target_verb_index]
wrong_choice = changed_verb_form[0]
sentence_tokens[target_verb_index] = "____________"
del sentence_tokens[target_aux_index]
fib_sentence = ' '.join(join_punctuation(sentence_tokens)).replace(' ', ' ')
generated_distractors.append([fib_sentence, correct_choice, wrong_choice])
# For the simple past, the verb is converted into the gerund form and an auxiliary is inserted based on the person.
elif verb_tense == 'simple_past':
changed_verb_form = getInflection(target_verb_lemma, tag='VBG', inflect_oov=True)
if person == 1 and number == 'Sing':
auxiliary_change = 'was'
elif person == 2 and number == 'Sing':
auxiliary_change = 'were'
elif person == 3 and number == 'Sing':
auxiliary_change = 'was'
elif person == 1 and number == 'Plur':
auxiliary_change = 'were'
elif person == 2 and number == 'Plur':
auxiliary_change = 'were'
elif person == 3 and number == 'Plur':
auxiliary_change = 'were'
correct_choice = sentence_tokens[target_verb_index]
wrong_choice = auxiliary_change + ' ' + changed_verb_form[0]
sentence_tokens[target_verb_index] = "____________"
fib_sentence = ' '.join(join_punctuation(sentence_tokens)).replace(' ', ' ')
generated_distractors.append([fib_sentence, correct_choice, wrong_choice])
# For the past progressive, the verb is converted into the past participle and the auxiliary is deleted.
elif verb_tense == 'past_progressive':
changed_verb_form = getInflection(target_verb_lemma, tag='VBN', inflect_oov=True)
correct_choice = sentence_tokens[target_aux_index] + ' ' + sentence_tokens[target_verb_index]
wrong_choice = changed_verb_form[0]
sentence_tokens[target_verb_index] = "____________"
del sentence_tokens[target_aux_index]
fib_sentence = ' '.join(join_punctuation(sentence_tokens)).replace(' ', ' ')
generated_distractors.append([fib_sentence, correct_choice, wrong_choice])
# For the present perfect, the auxiliary is changed with the auxiliary of past perfect
elif verb_tense == 'present_perfect':
correct_choice = sentence_tokens[target_aux_index] + ' ' + sentence_tokens[target_verb_index]
wrong_choice = 'had' + ' ' + sentence_tokens[target_verb_index]
sentence_tokens[target_verb_index] = "____________"
del sentence_tokens[target_aux_index]
fib_sentence = ' '.join(join_punctuation(sentence_tokens)).replace(' ', ' ')
generated_distractors.append([fib_sentence, correct_choice, wrong_choice])
# For the past perfect, the auxiliary is changed with the auxiliaries of present perfect, according the right number
elif verb_tense == 'past_perfect':
if person == 3 and number == 'Sing':
auxiliary_change = 'has'
else:
auxiliary_change = 'have'
correct_choice = sentence_tokens[target_aux_index] + ' ' + sentence_tokens[target_verb_index]
wrong_choice = auxiliary_change | |
<filename>release/stubs/System/ComponentModel/Design/Serialization.py
# encoding: utf-8
# module System.ComponentModel.Design.Serialization calls itself Serialization
# from System, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089
# by generator 1.145
""" NamespaceTracker represent a CLS namespace. """
# no imports
# no functions
# classes
class ComponentSerializationService(object):
""" Provides the base class for serializing a set of components or serializable objects into a serialization store. """
def CreateStore(self):
"""
CreateStore(self: ComponentSerializationService) -> SerializationStore
Creates a new System.ComponentModel.Design.Serialization.SerializationStore.
Returns: A new System.ComponentModel.Design.Serialization.SerializationStore.
"""
pass
def Deserialize(self, store, container=None):
"""
Deserialize(self: ComponentSerializationService, store: SerializationStore, container: IContainer) -> ICollection
Deserializes the given store and populates the given System.ComponentModel.IContainer with
deserialized System.ComponentModel.IComponent objects.
store: The System.ComponentModel.Design.Serialization.SerializationStore to deserialize.
container: The System.ComponentModel.IContainer to which System.ComponentModel.IComponent objects will be
added.
Returns: A collection of objects created according to the stored state.
Deserialize(self: ComponentSerializationService, store: SerializationStore) -> ICollection
Deserializes the given store to produce a collection of objects.
store: The System.ComponentModel.Design.Serialization.SerializationStore to deserialize.
Returns: A collection of objects created according to the stored state.
"""
pass
def DeserializeTo(
self, store, container, validateRecycledTypes=None, applyDefaults=None
):
"""
DeserializeTo(self: ComponentSerializationService, store: SerializationStore, container: IContainer, validateRecycledTypes: bool)
Deserializes the given System.ComponentModel.Design.Serialization.SerializationStore to the
given container, optionally validating recycled types.
store: The System.ComponentModel.Design.Serialization.SerializationStore to deserialize.
container: The container to which System.ComponentModel.IComponent objects will be added.
validateRecycledTypes: true to guarantee that the deserialization will only work if applied to an object of the same
type.
DeserializeTo(self: ComponentSerializationService, store: SerializationStore, container: IContainer)
Deserializes the given System.ComponentModel.Design.Serialization.SerializationStore to the
given container.
store: The System.ComponentModel.Design.Serialization.SerializationStore to deserialize.
container: The container to which System.ComponentModel.IComponent objects will be added.
DeserializeTo(self: ComponentSerializationService, store: SerializationStore, container: IContainer, validateRecycledTypes: bool, applyDefaults: bool)
Deserializes the given System.ComponentModel.Design.Serialization.SerializationStore to the
given container, optionally applying default property values.
store: The System.ComponentModel.Design.Serialization.SerializationStore to deserialize.
container: The container to which System.ComponentModel.IComponent objects will be added.
validateRecycledTypes: true to guarantee that the deserialization will only work if applied to an object of the same
type.
applyDefaults: true to indicate that the default property values should be applied.
"""
pass
def LoadStore(self, stream):
"""
LoadStore(self: ComponentSerializationService, stream: Stream) -> SerializationStore
Loads a System.ComponentModel.Design.Serialization.SerializationStore from a stream.
stream: The System.IO.Stream from which the store will be loaded.
Returns: A new System.ComponentModel.Design.Serialization.SerializationStore instance.
"""
pass
def Serialize(self, store, value):
"""
Serialize(self: ComponentSerializationService, store: SerializationStore, value: object)
Serializes the given object to the given
System.ComponentModel.Design.Serialization.SerializationStore.
store: The System.ComponentModel.Design.Serialization.SerializationStore to which the state of value
will be written.
value: The object to serialize.
"""
pass
def SerializeAbsolute(self, store, value):
"""
SerializeAbsolute(self: ComponentSerializationService, store: SerializationStore, value: object)
Serializes the given object, accounting for default property values.
store: The System.ComponentModel.Design.Serialization.SerializationStore to which the state of value
will be serialized.
value: The object to serialize.
"""
pass
def SerializeMember(self, store, owningObject, member):
"""
SerializeMember(self: ComponentSerializationService, store: SerializationStore, owningObject: object, member: MemberDescriptor)
Serializes the given member on the given object.
store: The System.ComponentModel.Design.Serialization.SerializationStore to which the state of member
will be serialized.
owningObject: The object to which member is attached.
member: A System.ComponentModel.MemberDescriptor specifying the member to serialize.
"""
pass
def SerializeMemberAbsolute(self, store, owningObject, member):
"""
SerializeMemberAbsolute(self: ComponentSerializationService, store: SerializationStore, owningObject: object, member: MemberDescriptor)
Serializes the given member on the given object, accounting for the default property value.
store: The System.ComponentModel.Design.Serialization.SerializationStore to which the state of member
will be serialized.
owningObject: The object to which member is attached.
member: The member to serialize.
"""
pass
class ContextStack(object):
"""
Provides a stack object that can be used by a serializer to make information available to nested serializers.
ContextStack()
"""
def Append(self, context):
"""
Append(self: ContextStack, context: object)
Appends an object to the end of the stack, rather than pushing it onto the top of the stack.
context: A context object to append to the stack.
"""
pass
def Pop(self):
"""
Pop(self: ContextStack) -> object
Removes the current object off of the stack, returning its value.
Returns: The object removed from the stack; null if no objects are on the stack.
"""
pass
def Push(self, context):
"""
Push(self: ContextStack, context: object)
Pushes, or places, the specified object onto the stack.
context: The context object to push onto the stack.
"""
pass
def __getitem__(self, *args): # cannot find CLR method
""" x.__getitem__(y) <==> x[y]x.__getitem__(y) <==> x[y] """
pass
Current = property(
lambda self: object(), lambda self, v: None, lambda self: None
) # default
"""Gets the current object on the stack.
Get: Current(self: ContextStack) -> object
"""
class DefaultSerializationProviderAttribute(Attribute, _Attribute):
"""
The System.ComponentModel.Design.Serialization.DefaultSerializationProviderAttribute attribute is placed on a serializer to indicate the class to use as a default provider of that type of serializer.
DefaultSerializationProviderAttribute(providerType: Type)
DefaultSerializationProviderAttribute(providerTypeName: str)
"""
def __init__(self, *args): # cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod # known case of __new__
def __new__(self, *__args):
"""
__new__(cls: type, providerType: Type)
__new__(cls: type, providerTypeName: str)
"""
pass
ProviderTypeName = property(
lambda self: object(), lambda self, v: None, lambda self: None
) # default
"""Gets the type name of the serialization provider.
Get: ProviderTypeName(self: DefaultSerializationProviderAttribute) -> str
"""
class DesignerLoader(object):
""" Provides a basic designer loader interface that can be used to implement a custom designer loader. """
def BeginLoad(self, host):
"""
BeginLoad(self: DesignerLoader, host: IDesignerLoaderHost)
Begins loading a designer.
host: The loader host through which this loader loads components.
"""
pass
def Dispose(self):
"""
Dispose(self: DesignerLoader)
Releases all resources used by the System.ComponentModel.Design.Serialization.DesignerLoader.
"""
pass
def Flush(self):
"""
Flush(self: DesignerLoader)
Writes cached changes to the location that the designer was loaded from.
"""
pass
Loading = property(
lambda self: object(), lambda self, v: None, lambda self: None
) # default
"""Gets a value indicating whether the loader is currently loading a document.
Get: Loading(self: DesignerLoader) -> bool
"""
class DesignerSerializerAttribute(Attribute, _Attribute):
"""
Indicates a serializer for the serialization manager to use to serialize the values of the type this attribute is applied to. This class cannot be inherited.
DesignerSerializerAttribute(serializerType: Type, baseSerializerType: Type)
DesignerSerializerAttribute(serializerTypeName: str, baseSerializerType: Type)
DesignerSerializerAttribute(serializerTypeName: str, baseSerializerTypeName: str)
"""
def __init__(self, *args): # cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod # known case of __new__
def __new__(self, *__args):
"""
__new__(cls: type, serializerType: Type, baseSerializerType: Type)
__new__(cls: type, serializerTypeName: str, baseSerializerType: Type)
__new__(cls: type, serializerTypeName: str, baseSerializerTypeName: str)
"""
pass
SerializerBaseTypeName = property(
lambda self: object(), lambda self, v: None, lambda self: None
) # default
"""Gets the fully qualified type name of the serializer base type.
Get: SerializerBaseTypeName(self: DesignerSerializerAttribute) -> str
"""
SerializerTypeName = property(
lambda self: object(), lambda self, v: None, lambda self: None
) # default
"""Gets the fully qualified type name of the serializer.
Get: SerializerTypeName(self: DesignerSerializerAttribute) -> str
"""
TypeId = property(
lambda self: object(), lambda self, v: None, lambda self: None
) # default
"""Indicates a unique ID for this attribute type.
Get: TypeId(self: DesignerSerializerAttribute) -> object
"""
class IDesignerLoaderHost(IDesignerHost, IServiceContainer, IServiceProvider):
""" Provides an interface that can extend a designer host to support loading from a serialized state. """
def EndLoad(self, baseClassName, successful, errorCollection):
"""
EndLoad(self: IDesignerLoaderHost, baseClassName: str, successful: bool, errorCollection: ICollection)
Ends the designer loading operation.
baseClassName: The fully qualified name of the base class of the document that this designer is designing.
successful: true if the designer is successfully loaded; otherwise, false.
errorCollection: A collection containing the errors encountered during load, if any. If no errors were
encountered, pass either an empty collection or null.
"""
pass
def Reload(self):
"""
Reload(self: IDesignerLoaderHost)
Reloads the design document.
"""
pass
def __init__(self, *args): # cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
class IDesignerLoaderHost2(
IDesignerLoaderHost, IDesignerHost, | |
are also stripes, they are undivided stripes.
We assume the vertical bars split the page in two portions, and not more,
and that they occur more or less in the middle of the page.
If many vertical bars have been detected, we sort them by y1 ascending and then
y2 descending and then by x.
We filter the bars: if the last bar reached to y = height, we only consider
bars that start lower than height.
!!! note "Fine tuning needed later on"
The vertical strokes give a rough estimate:
it is possible that they start and end in the middle of the lines beside them.
We will need histograms for the fine tuning.
Parameters
----------
stages: dict
We need access to the normalized stage to get the page size.
stretchesV: dict
Vertical line segments per x-coordinate, as delivered by `getStretches`.
Returns
-------
list
A list of stripes, specified as (x, y1, y2) values,
where the y-coordinates y1 and y2 specify the vertical extent of the stripe,
and x is the x coordinate of the dividing vertical stroke if there is one
and `None` otherwise.
"""
normalized = stages["normalized"]
(maxH, maxW) = normalized.shape[0:2]
lastHeight = 0
segments = []
for (x, ys) in stretchesV.items():
for (y1, y2, thickness) in ys:
segments.append((y1, y2, x, thickness))
stripes = []
for (y1, y2, x, thickness) in sorted(
segments, key=lambda z: (z[0], -z[1], -z[3], -z[2] or -1)
):
if y1 > lastHeight:
stripes.append((None, lastHeight, y1))
stripes.append((x, y1, y2))
lastHeight = y2
if lastHeight < maxH:
stripes.append((None, lastHeight, maxH))
return stripes
def getBlocks(C, info, stages, pageH, stripes, stretchesH, batch):
"""Fine-tune stripes into blocks.
We enlarge the stripes vertically by roughly a line height
and call `adjustVertical` to get precise vertical demarcations
for the blocks at both sides of the stripe if there is one or else
for the undivided stripe.
The idea is:
If a stripe has a vertical bar, we slightly extend the boxes left and right
so that the top and bottom lines next to the bar are completely included.
If a stripe has no vertical bar, we shrink the box
so that partial top and bottom lines are delegated to the boxes above
and below.
We only shrink if the box is close to the boxes above or below.
We do not grow boxes across significant horizontal strokes.
We write the box layout unto the `layout` layer.
Parameters
----------
C: object
Configuration settings
stages: dict
We need access to several intermediate results.
pageH: int
The height of a full page in pixels (the image might be a fraction of a page)
stripes: list
The preliminary stripe division of the page, as delivered by
`getStripes`.
stretchesH: list
The horizontal stretches across which we do not shrink of enlarge
batch: boolean
Whether we run in batch mode.
Returns
-------
dict
Blocks keyed by stripe number and block specification
(one of `""`, `"l"`, `"r"`).
The values form dicts themselves, with in particular the bounding box
information under key `box` specified as four numbers:
left, top, right, bottom.
The dict is ordered.
"""
marginX = C.blockMarginX
blockColor = C.blockRGB
letterColor = C.letterRGB
blurred = stages["blurred"]
normalized = stages["normalized"]
(maxH, maxW) = normalized.shape[0:2]
leeHeight = int(pageH // 20)
blocks = {}
upperHStretch = min(stretchesH) if stretchesH else 0
lowerHStretch = max(stretchesH) if stretchesH else maxH
if not batch:
layout = stages["layout"]
for (stripe, (x, yMin, yMax)) in enumerate(stripes):
yMinLee = max((0, yMin - leeHeight))
yMaxLee = min((maxH, yMax + leeHeight))
if x is None:
(theYMin, theYMax) = adjustVertical(
C, info, blurred, pageH, 0, maxW, yMin, yMinLee, yMax, yMaxLee, False
)
blocks[(stripe, "")] = dict(
box=(marginX, theYMin, maxW - marginX, theYMax),
sep=x,
)
if not batch:
cv2.rectangle(
layout,
(marginX, theYMin),
(maxW - marginX, theYMax),
blockColor,
4,
)
addBlockName(layout, theYMin, 0, maxW, marginX, letterColor, stripe, "")
else:
yMinLeeBound = (
yMinLee
if upperHStretch == 0 or upperHStretch > yMin
else max((yMinLee, max(y for y in stretchesH if y <= yMin)))
)
yMaxLeeBound = (
yMaxLee
if lowerHStretch == maxH or lowerHStretch < yMax
else min((yMaxLee, min(y for y in stretchesH if y >= yMax)))
)
(theYMinL, theYMaxL) = adjustVertical(
C,
info,
blurred,
pageH,
0,
x,
yMin,
yMinLeeBound,
yMax,
yMaxLeeBound,
True,
)
(theYMinR, theYMaxR) = adjustVertical(
C,
info,
blurred,
pageH,
x,
maxW,
yMin,
yMinLeeBound,
yMax,
yMaxLeeBound,
True,
)
blocks[(stripe, "l")] = dict(
box=(marginX, theYMinL, x - marginX, theYMaxL), sep=x
)
blocks[(stripe, "r")] = dict(
box=(x + marginX, theYMinR, maxW - marginX, theYMaxR), sep=x
)
if not batch:
cv2.rectangle(
layout,
(marginX, theYMinL),
(x - marginX, theYMaxL),
blockColor,
4,
)
addBlockName(layout, theYMinL, 0, x, marginX, letterColor, stripe, "l")
cv2.rectangle(
layout,
(x + marginX, theYMinR),
(maxW - marginX, theYMaxR),
blockColor,
4,
)
addBlockName(
layout, theYMinR, x, maxW, marginX, letterColor, stripe, "r"
)
return collections.OrderedDict(sorted(blocks.items()))
def applyHRules(C, stages, stretchesH, stripes, blocks, batch, boxed):
"""Trims regions above horizontal top lines and below bottom lines.
Inspect the horizontal strokes and specifiy which ones are
top separators and which ones are bottom separators.
First we map each horizontal stretch to one of the page stripes.
If a stretch occurs between stripes, we map it to the stripe above.
A horizontal stroke is a top separator if
* it is mapped to the first stripe **and**
* it is situated in the top fragment of the page.
We mark the discarded material on the layout page by overlaying
it with gray.
Parameters
----------
C: object
Configuration settings
stages: dict
We need access to several intermediate results.
stretchesH: dict
Horizontal line segments per y-coordinate, as delivered by `getStretches`.
stripes: list
The preliminary stripe division of the page, as delivered by
`getStripes`.
blocks: dict
The blocks as delivered by `getBlocks`.
boxed: boolean
Whether we run in boxed mode (generate boxes around wiped marks).
Returns
-------
None
The blocks dict will be updated: each block value gets a new key `inner`
with the bounding box info after stripping the top and bottom material.
"""
mColor = C.marginRGB
whit = C.whiteGRS
white = C.whiteRGB
letterColor = C.letterRGB
normalized = stages["normalized"]
demargined = normalized.copy()
stages["demargined"] = demargined
if not batch:
layout = stages["layout"]
if not batch or boxed:
normalizedC = stages["normalizedC"]
demarginedC = normalizedC.copy()
stages["demarginedC"] = demarginedC
(maxH, maxW) = normalized.shape[0:2]
topCriterion = maxH / 6
topXCriterion = maxH / 4
for ((stripe, block), data) in blocks.items():
(bL, bT, bR, bB) = data["box"]
x = data["sep"]
top = None
bottom = None
for (y, xs) in sorted(stretchesH.items()):
if y < bT:
continue
if bB < y:
break
for (x1, x2, thickness) in xs:
if x is not None:
if block == "l" and x1 >= x:
continue
if block == "r" and x2 <= x:
continue
isTop = stripe == 0 and (
len(stripes) == 1
and y < topCriterion
or len(stripes) > 1
and y < topXCriterion
)
if isTop:
top = y + 2 * thickness + 2
else:
if bottom is None:
bottom = y - 2 * thickness - 2
if not batch:
addHStroke(
layout,
isTop,
stripe,
block,
thickness,
y,
x1,
x2,
letterColor,
)
top = bT if top is None else top
bottom = bB if bottom is None else bottom
left = bL + 2
right = bR - 2
data["inner"] = (left, top, right, bottom)
if top != bT:
if not batch:
overlay(layout, left, bT + 2, right, top, white, mColor)
cv2.rectangle(demargined, (left, bT), (right, top), whit, -1)
if not batch or boxed:
overlay(demarginedC, left, bT + 2, right, top, white, mColor)
if bottom != bB:
if not batch:
overlay(layout, left, bottom, right, bB - 2, white, mColor)
cv2.rectangle(demargined, (left, bottom), (right, bB), whit, -1)
if not batch or boxed:
overlay(demarginedC, left, bottom, right, bB - 2, white, mColor)
def grayInterBlocks(C, stages, blocks):
"""Overlay the space between blocks with gray.
Remove also the empty blocks from the block | |
<reponame>SaqibMamoon/GSDT<gh_stars>1-10
import glob
import os
import os.path as osp
import random
import cv2
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn.functional as F
from torchvision.ops import nms
import motmetrics as mm
# import maskrcnn_benchmark.layers.nms as nms
# Set printoptions
torch.set_printoptions(linewidth=1320, precision=5, profile='long')
np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5
def mkdir_if_missing(d):
if not osp.exists(d):
os.makedirs(d)
def float3(x): # format floats to 3 decimals
return float(format(x, '.3f'))
def init_seeds(seed=0):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
def load_classes(path):
"""
Loads class labels at 'path'
"""
fp = open(path, 'r')
names = fp.read().split('\n')
return list(filter(None, names)) # filter removes empty strings (such as last line)
def model_info(model): # Plots a line-by-line description of a PyTorch model
n_p = sum(x.numel() for x in model.parameters()) # number parameters
n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients
print('\n%5s %50s %9s %12s %20s %12s %12s' % ('layer', 'name', 'gradient', 'parameters', 'shape', 'mu', 'sigma'))
for i, (name, p) in enumerate(model.named_parameters()):
name = name.replace('module_list.', '')
print('%5g %50s %9s %12g %20s %12.3g %12.3g' % (
i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std()))
print('Model Summary: %g layers, %g parameters, %g gradients\n' % (i + 1, n_p, n_g))
def plot_one_box(x, img, color=None, label=None, line_thickness=None): # Plots one bounding box on image img
tl = line_thickness or round(0.0004 * max(img.shape[0:2])) + 1 # line thickness
color = color or [random.randint(0, 255) for _ in range(3)]
c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))
cv2.rectangle(img, c1, c2, color, thickness=tl)
if label:
tf = max(tl - 1, 1) # font thickness
t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
cv2.rectangle(img, c1, c2, color, -1) # filled
cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)
def weights_init_normal(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
torch.nn.init.normal_(m.weight.data, 0.0, 0.03)
elif classname.find('BatchNorm2d') != -1:
torch.nn.init.normal_(m.weight.data, 1.0, 0.03)
torch.nn.init.constant_(m.bias.data, 0.0)
def xyxy2xywh(x):
# Convert bounding box format from [x1, y1, x2, y2] to [x, y, w, h]
y = torch.zeros(x.shape) if x.dtype is torch.float32 else np.zeros(x.shape)
y[:, 0] = (x[:, 0] + x[:, 2]) / 2
y[:, 1] = (x[:, 1] + x[:, 3]) / 2
y[:, 2] = x[:, 2] - x[:, 0]
y[:, 3] = x[:, 3] - x[:, 1]
return y
def xywh2xyxy(x):
# Convert bounding box format from [x, y, w, h] to [x1, y1, x2, y2]
y = torch.zeros(x.shape) if x.dtype is torch.float32 else np.zeros(x.shape)
y[:, 0] = (x[:, 0] - x[:, 2] / 2)
y[:, 1] = (x[:, 1] - x[:, 3] / 2)
y[:, 2] = (x[:, 0] + x[:, 2] / 2)
y[:, 3] = (x[:, 1] + x[:, 3] / 2)
return y
def scale_coords(img_size, coords, img0_shape):
# Rescale x1, y1, x2, y2 from 416 to image size
gain_w = float(img_size[0]) / img0_shape[1] # gain = old / new
gain_h = float(img_size[1]) / img0_shape[0]
gain = min(gain_w, gain_h)
pad_x = (img_size[0] - img0_shape[1] * gain) / 2 # width padding
pad_y = (img_size[1] - img0_shape[0] * gain) / 2 # height padding
coords[:, [0, 2]] -= pad_x
coords[:, [1, 3]] -= pad_y
coords[:, 0:4] /= gain
coords[:, :4] = torch.clamp(coords[:, :4], min=0)
return coords
def ap_per_class(tp, conf, pred_cls, target_cls):
""" Compute the average precision, given the recall and precision curves.
Method originally from https://github.com/rafaelpadilla/Object-Detection-Metrics.
# Arguments
tp: True positives (list).
conf: Objectness value from 0-1 (list).
pred_cls: Predicted object classes (list).
target_cls: True object classes (list).
# Returns
The average precision as computed in py-faster-rcnn.
"""
# lists/pytorch to numpy
tp, conf, pred_cls, target_cls = np.array(tp), np.array(conf), np.array(pred_cls), np.array(target_cls)
# Sort by objectness
i = np.argsort(-conf)
tp, conf, pred_cls = tp[i], conf[i], pred_cls[i]
# Find unique classes
unique_classes = np.unique(np.concatenate((pred_cls, target_cls), 0))
# Create Precision-Recall curve and compute AP for each class
ap, p, r = [], [], []
for c in unique_classes:
i = pred_cls == c
n_gt = sum(target_cls == c) # Number of ground truth objects
n_p = sum(i) # Number of predicted objects
if (n_p == 0) and (n_gt == 0):
continue
elif (n_p == 0) or (n_gt == 0):
ap.append(0)
r.append(0)
p.append(0)
else:
# Accumulate FPs and TPs
fpc = np.cumsum(1 - tp[i])
tpc = np.cumsum(tp[i])
# Recall
recall_curve = tpc / (n_gt + 1e-16)
r.append(tpc[-1] / (n_gt + 1e-16))
# Precision
precision_curve = tpc / (tpc + fpc)
p.append(tpc[-1] / (tpc[-1] + fpc[-1]))
# AP from recall-precision curve
ap.append(compute_ap(recall_curve, precision_curve))
return np.array(ap), unique_classes.astype('int32'), np.array(r), np.array(p)
def compute_ap(recall, precision):
""" Compute the average precision, given the recall and precision curves.
Code originally from https://github.com/rbgirshick/py-faster-rcnn.
# Arguments
recall: The recall curve (list).
precision: The precision curve (list).
# Returns
The average precision as computed in py-faster-rcnn.
"""
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.], recall, [1.]))
mpre = np.concatenate(([0.], precision, [0.]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
def bbox_iou(box1, box2, x1y1x2y2=False):
"""
Returns the IoU of two bounding boxes
"""
N, M = len(box1), len(box2)
if x1y1x2y2:
# Get the coordinates of bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = box1[:, 0], box1[:, 1], box1[:, 2], box1[:, 3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[:, 0], box2[:, 1], box2[:, 2], box2[:, 3]
else:
# Transform from center and width to exact coordinates
b1_x1, b1_x2 = box1[:, 0] - box1[:, 2] / 2, box1[:, 0] + box1[:, 2] / 2
b1_y1, b1_y2 = box1[:, 1] - box1[:, 3] / 2, box1[:, 1] + box1[:, 3] / 2
b2_x1, b2_x2 = box2[:, 0] - box2[:, 2] / 2, box2[:, 0] + box2[:, 2] / 2
b2_y1, b2_y2 = box2[:, 1] - box2[:, 3] / 2, box2[:, 1] + box2[:, 3] / 2
# get the coordinates of the intersection rectangle
inter_rect_x1 = torch.max(b1_x1.unsqueeze(1), b2_x1)
inter_rect_y1 = torch.max(b1_y1.unsqueeze(1), b2_y1)
inter_rect_x2 = torch.min(b1_x2.unsqueeze(1), b2_x2)
inter_rect_y2 = torch.min(b1_y2.unsqueeze(1), b2_y2)
# Intersection area
inter_area = torch.clamp(inter_rect_x2 - inter_rect_x1, 0) * torch.clamp(inter_rect_y2 - inter_rect_y1, 0)
# Union Area
b1_area = ((b1_x2 - b1_x1) * (b1_y2 - b1_y1))
b1_area = ((b1_x2 - b1_x1) * (b1_y2 - b1_y1)).view(-1,1).expand(N,M)
b2_area = ((b2_x2 - b2_x1) * (b2_y2 - b2_y1)).view(1,-1).expand(N,M)
return inter_area / (b1_area + b2_area - inter_area + 1e-16)
def build_targets_max(target, anchor_wh, nA, nC, nGh, nGw):
"""
returns nT, nCorrect, tx, ty, tw, th, tconf, tcls
"""
nB = len(target) # number of images in batch
txy = torch.zeros(nB, nA, nGh, nGw, 2).cuda() # batch size, anchors, grid size
twh = torch.zeros(nB, nA, nGh, nGw, 2).cuda()
tconf = torch.LongTensor(nB, nA, nGh, nGw).fill_(0).cuda()
tcls = torch.ByteTensor(nB, nA, nGh, nGw, nC).fill_(0).cuda() # nC = number of classes
tid = torch.LongTensor(nB, nA, nGh, nGw, 1).fill_(-1).cuda()
for b in range(nB):
t = target[b]
t_id = t[:, 1].clone().long().cuda()
t = t[:,[0,2,3,4,5]]
nTb = len(t) # number of targets
if nTb == 0:
continue
#gxy, gwh = t[:, 1:3] * nG, t[:, 3:5] * nG
gxy, gwh = t[: , 1:3].clone() , t[:, 3:5].clone()
gxy[:, 0] = gxy[:, 0] * nGw
gxy[:, 1] = gxy[:, 1] * nGh
gwh[:, 0] = gwh[:, 0] * nGw
gwh[:, 1] = gwh[:, 1] * nGh
gi = torch.clamp(gxy[:, 0], min=0, max=nGw -1).long()
gj = torch.clamp(gxy[:, 1], min=0, max=nGh -1).long()
# Get grid box indices and prevent overflows (i.e. 13.01 on 13 anchors)
#gi, gj = torch.clamp(gxy.long(), min=0, max=nG - 1).t()
#gi, gj = gxy.long().t()
# iou of targets-anchors (using wh only)
box1 = gwh
box2 = anchor_wh.unsqueeze(1)
inter_area = torch.min(box1, box2).prod(2)
iou = inter_area / (box1.prod(1) + box2.prod(2) - inter_area + 1e-16)
# Select best iou_pred and anchor
iou_best, a = iou.max(0) # best anchor [0-2] for each target
# Select best unique target-anchor combinations
if nTb > | |
= self._listeners.get(Signals.pre_delete, {}).get(self.__class__, [])
for listener in cls_listeners:
listeners.append(
listener(
self.__class__,
self,
using_db,
)
)
await asyncio.gather(*listeners)
async def _post_delete(
self,
using_db: Optional[BaseDBAsyncClient] = None,
) -> None:
listeners = []
cls_listeners = self._listeners.get(Signals.post_delete, {}).get(self.__class__, [])
for listener in cls_listeners:
listeners.append(
listener(
self.__class__,
self,
using_db,
)
)
await asyncio.gather(*listeners)
async def _pre_save(
self,
using_db: Optional[BaseDBAsyncClient] = None,
update_fields: Optional[Iterable[str]] = None,
) -> None:
listeners = []
cls_listeners = self._listeners.get(Signals.pre_save, {}).get(self.__class__, [])
for listener in cls_listeners:
listeners.append(listener(self.__class__, self, using_db, update_fields))
await asyncio.gather(*listeners)
async def _post_save(
self,
using_db: Optional[BaseDBAsyncClient] = None,
created: bool = False,
update_fields: Optional[Iterable[str]] = None,
) -> None:
listeners = []
cls_listeners = self._listeners.get(Signals.post_save, {}).get(self.__class__, [])
for listener in cls_listeners:
listeners.append(listener(self.__class__, self, created, using_db, update_fields))
await asyncio.gather(*listeners)
async def save(
self,
using_db: Optional[BaseDBAsyncClient] = None,
update_fields: Optional[Iterable[str]] = None,
force_create: bool = False,
force_update: bool = False,
) -> None:
"""
Creates/Updates the current model object.
:param update_fields: If provided, it should be a tuple/list of fields by name.
This is the subset of fields that should be updated.
If the object needs to be created ``update_fields`` will be ignored.
:param using_db: Specific DB connection to use instead of default bound
:param force_create: Forces creation of the record
:param force_update: Forces updating of the record
:raises IncompleteInstanceError: If the model is partial and the fields are not available for persistence.
:raises IntegrityError: If the model can't be created or updated (specifically if force_create or force_update has been set)
"""
db = using_db or self._choose_db(True)
executor = db.executor_class(model=self.__class__, db=db)
if self._partial:
if update_fields:
for field in update_fields:
if not hasattr(self, self._meta.pk_attr):
raise IncompleteInstanceError(
f"{self.__class__.__name__} is a partial model without primary key fetchd. Partial update not available"
)
if not hasattr(self, field):
raise IncompleteInstanceError(
f"{self.__class__.__name__} is a partial model, field '{field}' is not available"
)
else:
raise IncompleteInstanceError(
f"{self.__class__.__name__} is a partial model, can only be saved with the relevant update_field provided"
)
await self._pre_save(db, update_fields)
if force_create:
await executor.execute_insert(self)
created = True
elif force_update:
rows = await executor.execute_update(self, update_fields)
if rows == 0:
raise IntegrityError(f"Can't update object that doesn't exist. PK: {self.pk}")
created = False
else:
if self._saved_in_db or update_fields:
if self.pk is None:
await executor.execute_insert(self)
created = True
else:
await executor.execute_update(self, update_fields)
created = False
else:
# TODO: Do a merge/upsert operation here instead. Let the executor determine an optimal strategy for each DB engine.
await executor.execute_insert(self)
created = True
self._saved_in_db = True
await self._post_save(db, created, update_fields)
async def delete(self, using_db: Optional[BaseDBAsyncClient] = None) -> None:
"""
Deletes the current model object.
:param using_db: Specific DB connection to use instead of default bound
:raises OperationalError: If object has never been persisted.
"""
db = using_db or self._choose_db(True)
if not self._saved_in_db:
raise OperationalError("Can't delete unpersisted record")
await self._pre_delete(db)
await db.executor_class(model=self.__class__, db=db).execute_delete(self)
await self._post_delete(db)
async def fetch_related(self, *args: Any, using_db: Optional[BaseDBAsyncClient] = None) -> None:
"""
Fetch related fields.
.. code-block:: python3
User.fetch_related("emails", "manager")
:param args: The related fields that should be fetched.
:param using_db: Specific DB connection to use instead of default bound
"""
db = using_db or self._choose_db()
await db.executor_class(model=self.__class__, db=db).fetch_for_list([self], *args)
async def refresh_from_db(
self,
fields: Optional[Iterable[str]] = None,
using_db: Optional[BaseDBAsyncClient] = None,
) -> None:
"""
Refresh latest data from db. When this method is called without arguments
all db fields of the model are updated to the values currently present in the database.
.. code-block:: python3
user.refresh_from_db(fields=['name'])
:param fields: The special fields that to be refreshed.
:param using_db: Specific DB connection to use instead of default bound.
:raises OperationalError: If object has never been persisted.
"""
if not self._saved_in_db:
raise OperationalError("Can't refresh unpersisted record")
db = using_db or self._choose_db()
qs = QuerySet(self.__class__).using_db(db).only(*(fields or []))
obj = await qs.get(pk=self.pk)
for field in fields or self._meta.db_fields:
setattr(self, field, getattr(obj, field, None))
@classmethod
def _choose_db(cls, for_write: bool = False):
"""
Return the connection that will be used if this query is executed now.
:param for_write: Whether this query for write.
:return: BaseDBAsyncClient:
"""
if for_write:
db = router.db_for_write(cls)
else:
db = router.db_for_read(cls)
return db or cls._meta.db
@classmethod
async def get_or_create(
cls: Type[MODEL],
defaults: Optional[dict] = None,
using_db: Optional[BaseDBAsyncClient] = None,
**kwargs: Any,
) -> Tuple[MODEL, bool]:
"""
Fetches the object if exists (filtering on the provided parameters),
else creates an instance with any unspecified parameters as default values.
:param defaults: Default values to be added to a created instance if it can't be fetched.
:param using_db: Specific DB connection to use instead of default bound
:param kwargs: Query parameters.
:raises IntegrityError: If create failed
:raises TransactionManagementError: If transaction error
"""
if not defaults:
defaults = {}
db = using_db or cls._choose_db(True)
async with in_transaction(connection_name=db.connection_name) as connection:
try:
return await cls.filter(**kwargs).using_db(connection).get(), False
except DoesNotExist:
try:
return await cls.create(using_db=connection, **defaults, **kwargs), True
except (IntegrityError, TransactionManagementError):
return await cls.filter(**kwargs).using_db(connection).get(), False
@classmethod
def select_for_update(
cls, nowait: bool = False, skip_locked: bool = False, of: Tuple[str, ...] = ()
) -> QuerySet[MODEL]:
"""
Make QuerySet select for update.
Returns a queryset that will lock rows until the end of the transaction,
generating a SELECT ... FOR UPDATE SQL statement on supported databases.
"""
return cls._meta.manager.get_queryset().select_for_update(nowait, skip_locked, of)
@classmethod
async def update_or_create(
cls: Type[MODEL],
defaults: Optional[dict] = None,
using_db: Optional[BaseDBAsyncClient] = None,
**kwargs: Any,
) -> Tuple[MODEL, bool]:
"""
A convenience method for updating an object with the given kwargs, creating a new one if necessary.
:param defaults: Default values used to update the object.
:param using_db: Specific DB connection to use instead of default bound
:param kwargs: Query parameters.
"""
if not defaults:
defaults = {}
db = using_db or cls._choose_db(True)
async with in_transaction(connection_name=db.connection_name) as connection:
instance = await cls.select_for_update().using_db(connection).get_or_none(**kwargs)
if instance:
await instance.update_from_dict(defaults).save(using_db=connection) # type:ignore
return instance, False
return await cls.get_or_create(defaults, db, **kwargs)
@classmethod
async def create(cls: Type[MODEL], **kwargs: Any) -> MODEL:
"""
Create a record in the DB and returns the object.
.. code-block:: python3
user = await User.create(name="...", email="...")
Equivalent to:
.. code-block:: python3
user = User(name="...", email="...")
await user.save()
:param kwargs: Model parameters.
"""
instance = cls(**kwargs)
instance._saved_in_db = False
db = kwargs.get("using_db") or cls._choose_db(True)
await instance.save(using_db=db, force_create=True)
return instance
@classmethod
def bulk_update(
cls: Type[MODEL],
objects: Iterable[MODEL],
fields: Iterable[str],
batch_size: Optional[int] = None,
) -> "BulkUpdateQuery":
"""
Update the given fields in each of the given objects in the database.
This method efficiently updates the given fields on the provided model instances, generally with one query.
.. code-block:: python3
users = [
await User.create(name="...", email="..."),
await User.create(name="...", email="...")
]
users[0].name = 'name1'
users[1].name = 'name2'
await User.bulk_update(users, fields=['name'])
:param objects: List of objects to bulk create
:param fields: The fields to update
:param batch_size: How many objects are created in a single query
"""
return cls._meta.manager.get_queryset().bulk_update(objects, fields, batch_size)
@classmethod
async def in_bulk(
cls: Type[MODEL], id_list: Iterable[Union[str, int]], field_name: str = "pk"
) -> Dict[str, MODEL]:
"""
Return a dictionary mapping each of the given IDs to the object with
that ID. If `id_list` isn't provided, evaluate the entire QuerySet.
:param id_list: A list of field values
:param field_name: Must be a unique field
"""
return await cls._meta.manager.get_queryset().in_bulk(id_list, field_name)
@classmethod
async def bulk_create(
cls: Type[MODEL],
objects: Iterable[MODEL],
batch_size: Optional[int] = None,
using_db: Optional[BaseDBAsyncClient] = None,
) -> None:
"""
Bulk insert operation:
.. note::
The bulk insert operation will do the minimum to ensure that the object
created in the DB has all the defaults and generated fields set,
but may be incomplete reference in Python.
e.g. ``IntField`` primary keys will not be populated.
This is recommend only for throw away inserts where you want to ensure optimal
insert performance.
.. code-block:: python3
User.bulk_create([
User(name="...", email="..."),
User(name="...", email="...")
])
:param objects: List of objects to bulk create
:param batch_size: How many objects are created in a single query
:param using_db: Specific DB connection to use instead of default bound
"""
db = using_db or cls._choose_db(True)
await db.executor_class(model=cls, db=db).execute_bulk_insert(objects, batch_size)
| |
Optional[pulumi.Input[str]]):
pulumi.set(self, "region", value)
@property
@pulumi.getter
def source(self) -> Optional[pulumi.Input[str]]:
"""
A string representing the local path of a file which will be used
as the object's content. Conflicts with `source` and `copy_from`.
"""
return pulumi.get(self, "source")
@source.setter
def source(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source", value)
@pulumi.input_type
class _ContainerObjectState:
def __init__(__self__, *,
container_name: Optional[pulumi.Input[str]] = None,
content: Optional[pulumi.Input[str]] = None,
content_disposition: Optional[pulumi.Input[str]] = None,
content_encoding: Optional[pulumi.Input[str]] = None,
content_length: Optional[pulumi.Input[int]] = None,
content_type: Optional[pulumi.Input[str]] = None,
copy_from: Optional[pulumi.Input[str]] = None,
date: Optional[pulumi.Input[str]] = None,
delete_after: Optional[pulumi.Input[int]] = None,
delete_at: Optional[pulumi.Input[str]] = None,
detect_content_type: Optional[pulumi.Input[bool]] = None,
etag: Optional[pulumi.Input[str]] = None,
last_modified: Optional[pulumi.Input[str]] = None,
metadata: Optional[pulumi.Input[Mapping[str, Any]]] = None,
name: Optional[pulumi.Input[str]] = None,
object_manifest: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
source: Optional[pulumi.Input[str]] = None,
trans_id: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering ContainerObject resources.
:param pulumi.Input[str] container_name: A unique (within an account) name for the container.
The container name must be from 1 to 256 characters long and can start
with any character and contain any pattern. Character set must be UTF-8.
The container name cannot contain a slash (/) character because this
character delimits the container and object name. For example, the path
/v1/account/www/pages specifies the www container, not the www/pages container.
:param pulumi.Input[str] content: A string representing the content of the object. Conflicts with
`source` and `copy_from`.
:param pulumi.Input[str] content_disposition: A string which specifies the override behavior for
the browser. For example, this header might specify that the browser use a download
program to save this file rather than show the file, which is the default.
:param pulumi.Input[str] content_encoding: A string representing the value of the Content-Encoding
metadata.
:param pulumi.Input[int] content_length: If the operation succeeds, this value is zero (0) or the
length of informational or error text in the response body.
:param pulumi.Input[str] content_type: A string which sets the MIME type for the object.
:param pulumi.Input[str] copy_from: A string representing the name of an object
used to create the new object by copying the `copy_from` object. The value is in form
{container}/{object}. You must UTF-8-encode and then URL-encode the names of the
container and object before you include them in the header. Conflicts with `source` and
`content`.
:param pulumi.Input[str] date: The date and time the system responded to the request, using the preferred
format of RFC 7231 as shown in this example Thu, 16 Jun 2016 15:10:38 GMT. The
time is always in UTC.
:param pulumi.Input[int] delete_after: An integer representing the number of seconds after which the
system removes the object. Internally, the Object Storage system stores this value in
the X-Delete-At metadata item.
:param pulumi.Input[str] delete_at: An string representing the date when the system removes the object.
For example, "2015-08-26" is equivalent to Mon, Wed, 26 Aug 2015 00:00:00 GMT.
:param pulumi.Input[bool] detect_content_type: If set to true, Object Storage guesses the content
type based on the file extension and ignores the value sent in the Content-Type
header, if present.
:param pulumi.Input[str] etag: Used to trigger updates. The only meaningful value is ${md5(file("path/to/file"))}.
:param pulumi.Input[str] last_modified: The date and time when the object was last modified. The date and time
stamp format is ISO 8601:
CCYY-MM-DDThh:mm:ss±hh:mm
For example, 2015-08-27T09:49:58-05:00.
The ±hh:mm value, if included, is the time zone as an offset from UTC. In the previous
example, the offset value is -05:00.
:param pulumi.Input[str] name: A unique name for the object.
:param pulumi.Input[str] object_manifest: A string set to specify that this is a dynamic large
object manifest object. The value is the container and object name prefix of the
segment objects in the form container/prefix. You must UTF-8-encode and then
URL-encode the names of the container and prefix before you include them in this
header.
:param pulumi.Input[str] region: The region in which to create the container. If
omitted, the `region` argument of the provider is used. Changing this
creates a new container.
:param pulumi.Input[str] source: A string representing the local path of a file which will be used
as the object's content. Conflicts with `source` and `copy_from`.
:param pulumi.Input[str] trans_id: A unique transaction ID for this request. Your service provider might
need this value if you report a problem.
"""
if container_name is not None:
pulumi.set(__self__, "container_name", container_name)
if content is not None:
pulumi.set(__self__, "content", content)
if content_disposition is not None:
pulumi.set(__self__, "content_disposition", content_disposition)
if content_encoding is not None:
pulumi.set(__self__, "content_encoding", content_encoding)
if content_length is not None:
pulumi.set(__self__, "content_length", content_length)
if content_type is not None:
pulumi.set(__self__, "content_type", content_type)
if copy_from is not None:
pulumi.set(__self__, "copy_from", copy_from)
if date is not None:
pulumi.set(__self__, "date", date)
if delete_after is not None:
pulumi.set(__self__, "delete_after", delete_after)
if delete_at is not None:
pulumi.set(__self__, "delete_at", delete_at)
if detect_content_type is not None:
pulumi.set(__self__, "detect_content_type", detect_content_type)
if etag is not None:
pulumi.set(__self__, "etag", etag)
if last_modified is not None:
pulumi.set(__self__, "last_modified", last_modified)
if metadata is not None:
pulumi.set(__self__, "metadata", metadata)
if name is not None:
pulumi.set(__self__, "name", name)
if object_manifest is not None:
pulumi.set(__self__, "object_manifest", object_manifest)
if region is not None:
pulumi.set(__self__, "region", region)
if source is not None:
pulumi.set(__self__, "source", source)
if trans_id is not None:
pulumi.set(__self__, "trans_id", trans_id)
@property
@pulumi.getter(name="containerName")
def container_name(self) -> Optional[pulumi.Input[str]]:
"""
A unique (within an account) name for the container.
The container name must be from 1 to 256 characters long and can start
with any character and contain any pattern. Character set must be UTF-8.
The container name cannot contain a slash (/) character because this
character delimits the container and object name. For example, the path
/v1/account/www/pages specifies the www container, not the www/pages container.
"""
return pulumi.get(self, "container_name")
@container_name.setter
def container_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "container_name", value)
@property
@pulumi.getter
def content(self) -> Optional[pulumi.Input[str]]:
"""
A string representing the content of the object. Conflicts with
`source` and `copy_from`.
"""
return pulumi.get(self, "content")
@content.setter
def content(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "content", value)
@property
@pulumi.getter(name="contentDisposition")
def content_disposition(self) -> Optional[pulumi.Input[str]]:
"""
A string which specifies the override behavior for
the browser. For example, this header might specify that the browser use a download
program to save this file rather than show the file, which is the default.
"""
return pulumi.get(self, "content_disposition")
@content_disposition.setter
def content_disposition(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "content_disposition", value)
@property
@pulumi.getter(name="contentEncoding")
def content_encoding(self) -> Optional[pulumi.Input[str]]:
"""
A string representing the value of the Content-Encoding
metadata.
"""
return pulumi.get(self, "content_encoding")
@content_encoding.setter
def content_encoding(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "content_encoding", value)
@property
@pulumi.getter(name="contentLength")
def content_length(self) -> Optional[pulumi.Input[int]]:
"""
If the operation succeeds, this value is zero (0) or the
length of informational or error text in the response body.
"""
return pulumi.get(self, "content_length")
@content_length.setter
def content_length(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "content_length", value)
@property
@pulumi.getter(name="contentType")
def content_type(self) -> Optional[pulumi.Input[str]]:
"""
A string which sets the MIME type for the object.
"""
return pulumi.get(self, "content_type")
@content_type.setter
def content_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "content_type", value)
@property
@pulumi.getter(name="copyFrom")
def copy_from(self) -> Optional[pulumi.Input[str]]:
"""
A string representing the name of an object
used to create the new object by copying the `copy_from` object. The value is in form
{container}/{object}. You must UTF-8-encode and then URL-encode the names of the
container and object before you include them in the header. Conflicts with `source` and
`content`.
"""
return pulumi.get(self, "copy_from")
@copy_from.setter
def copy_from(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "copy_from", value)
@property
@pulumi.getter
def date(self) -> Optional[pulumi.Input[str]]:
"""
The date and time the system responded to the request, using the preferred
format of RFC 7231 as shown in this example Thu, 16 Jun 2016 15:10:38 GMT. The
time is always in UTC.
"""
return pulumi.get(self, "date")
@date.setter
def date(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "date", value)
@property
@pulumi.getter(name="deleteAfter")
def delete_after(self) -> Optional[pulumi.Input[int]]:
"""
An integer representing the number of seconds after which the
system removes the object. Internally, the Object Storage system stores this value in
the X-Delete-At metadata item.
"""
return | |
<gh_stars>1-10
from typing import Union, List, Optional
from pyspark.sql.types import (
StructType,
StructField,
StringType,
ArrayType,
IntegerType,
DataType,
)
# This file is auto-generated by generate_schema so do not edit manually
# noinspection PyPep8Naming
class ImagingStudySchema:
"""
Representation of the content produced in a DICOM imaging study. A study
comprises a set of series, each of which includes a set of Service-Object Pair
Instances (SOP Instances - images or other data) acquired or produced in a
common context. A series is of only one modality (e.g. X-ray, CT, MR,
ultrasound), but a study may have multiple series of different modalities.
"""
# noinspection PyDefaultArgument
@staticmethod
def get_schema(
max_nesting_depth: Optional[int] = 6,
nesting_depth: int = 0,
nesting_list: List[str] = [],
max_recursion_limit: Optional[int] = 2,
include_extension: Optional[bool] = False,
extension_fields: Optional[List[str]] = [
"valueBoolean",
"valueCode",
"valueDate",
"valueDateTime",
"valueDecimal",
"valueId",
"valueInteger",
"valuePositiveInt",
"valueString",
"valueTime",
"valueUnsignedInt",
"valueUri",
"valueQuantity",
],
extension_depth: int = 0,
max_extension_depth: Optional[int] = 2,
) -> Union[StructType, DataType]:
"""
Representation of the content produced in a DICOM imaging study. A study
comprises a set of series, each of which includes a set of Service-Object Pair
Instances (SOP Instances - images or other data) acquired or produced in a
common context. A series is of only one modality (e.g. X-ray, CT, MR,
ultrasound), but a study may have multiple series of different modalities.
id: The logical id of the resource, as used in the URL for the resource. Once
assigned, this value never changes.
extension: May be used to represent additional information that is not part of the basic
definition of the resource. In order to make the use of extensions safe and
manageable, there is a strict set of governance applied to the definition and
use of extensions. Though any implementer is allowed to define an extension,
there is a set of requirements that SHALL be met as part of the definition of
the extension.
meta: The metadata about the resource. This is content that is maintained by the
infrastructure. Changes to the content may not always be associated with
version changes to the resource.
implicitRules: A reference to a set of rules that were followed when the resource was
constructed, and which must be understood when processing the content.
language: The base language in which the resource is written.
text: A human-readable narrative that contains a summary of the resource, and may be
used to represent the content of the resource to a human. The narrative need
not encode all the structured data, but is required to contain sufficient
detail to make it "clinically safe" for a human to just read the narrative.
Resource definitions may define what content should be represented in the
narrative to ensure clinical safety.
contained: These resources do not have an independent existence apart from the resource
that contains them - they cannot be identified independently, and nor can they
have their own independent transaction scope.
resourceType: This is a ImagingStudy resource
uid: Formal identifier for the study.
accession: Accession Number is an identifier related to some aspect of imaging workflow
and data management. Usage may vary across different institutions. See for
instance [IHE Radiology Technical Framework Volume 1 Appendix A](http://www.ih
e.net/uploadedFiles/Documents/Radiology/IHE_RAD_TF_Rev13.0_Vol1_FT_2014-07-30.
pdf).
identifier: Other identifiers for the study.
availability: Availability of study (online, offline, or nearline).
modalityList: A list of all the Series.ImageModality values that are actual acquisition
modalities, i.e. those in the DICOM Context Group 29 (value set OID
1.2.840.10008.6.1.19).
patient: The patient imaged in the study.
context: The encounter or episode at which the request is initiated.
started: Date and time the study started.
basedOn: A list of the diagnostic requests that resulted in this imaging study being
performed.
referrer: The requesting/referring physician.
interpreter: Who read the study and interpreted the images or other content.
endpoint: The network service providing access (e.g., query, view, or retrieval) for the
study. See implementation notes for information about using DICOM endpoints. A
study-level endpoint applies to each series in the study, unless overridden by
a series-level endpoint with the same Endpoint.type.
numberOfSeries: Number of Series in the Study. This value given may be larger than the number
of series elements this Resource contains due to resource availability,
security, or other factors. This element should be present if any series
elements are present.
numberOfInstances: Number of SOP Instances in Study. This value given may be larger than the
number of instance elements this resource contains due to resource
availability, security, or other factors. This element should be present if
any instance elements are present.
procedureReference: A reference to the performed Procedure.
procedureCode: The code for the performed procedure type.
reason: Description of clinical condition indicating why the ImagingStudy was
requested.
description: Institution-generated description or classification of the Study performed.
series: Each study has one or more series of images or other content.
"""
from spark_fhir_schemas.stu3.complex_types.extension import ExtensionSchema
from spark_fhir_schemas.stu3.complex_types.meta import MetaSchema
from spark_fhir_schemas.stu3.complex_types.narrative import NarrativeSchema
from spark_fhir_schemas.stu3.simple_types.resourcelist import ResourceListSchema
from spark_fhir_schemas.stu3.complex_types.identifier import IdentifierSchema
from spark_fhir_schemas.stu3.complex_types.coding import CodingSchema
from spark_fhir_schemas.stu3.complex_types.reference import ReferenceSchema
from spark_fhir_schemas.stu3.complex_types.codeableconcept import (
CodeableConceptSchema,
)
from spark_fhir_schemas.stu3.complex_types.imagingstudy_series import (
ImagingStudy_SeriesSchema,
)
if (
max_recursion_limit
and nesting_list.count("ImagingStudy") >= max_recursion_limit
) or (max_nesting_depth and nesting_depth >= max_nesting_depth):
return StructType([StructField("id", StringType(), True)])
# add my name to recursion list for later
my_nesting_list: List[str] = nesting_list + ["ImagingStudy"]
schema = StructType(
[
# The logical id of the resource, as used in the URL for the resource. Once
# assigned, this value never changes.
StructField("id", StringType(), True),
# May be used to represent additional information that is not part of the basic
# definition of the resource. In order to make the use of extensions safe and
# manageable, there is a strict set of governance applied to the definition and
# use of extensions. Though any implementer is allowed to define an extension,
# there is a set of requirements that SHALL be met as part of the definition of
# the extension.
StructField(
"extension",
ArrayType(
ExtensionSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# The metadata about the resource. This is content that is maintained by the
# infrastructure. Changes to the content may not always be associated with
# version changes to the resource.
StructField(
"meta",
MetaSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# A reference to a set of rules that were followed when the resource was
# constructed, and which must be understood when processing the content.
StructField("implicitRules", StringType(), True),
# The base language in which the resource is written.
StructField("language", StringType(), True),
# A human-readable narrative that contains a summary of the resource, and may be
# used to represent the content of the resource to a human. The narrative need
# not encode all the structured data, but is required to contain sufficient
# detail to make it "clinically safe" for a human to just read the narrative.
# Resource definitions may define what content should be represented in the
# narrative to ensure clinical safety.
StructField(
"text",
NarrativeSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# These resources do not have an independent existence apart from the resource
# that contains them - they cannot be identified independently, and nor can they
# have their own independent transaction scope.
StructField(
"contained",
ArrayType(
ResourceListSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# This is a ImagingStudy resource
StructField("resourceType", StringType(), True),
# Formal identifier for the study.
StructField("uid", StringType(), True),
# Accession Number is an identifier related to some aspect of imaging workflow
# and data management. Usage may vary across different institutions. See for
# instance [IHE Radiology Technical Framework Volume 1 Appendix A](http://www.ih
# e.net/uploadedFiles/Documents/Radiology/IHE_RAD_TF_Rev13.0_Vol1_FT_2014-07-30.
# | |
Reads the status reports from the Bits# for the specified
usually short time period t. The script will wait for this time
to lapse so not ideal for time critical applications.
If t is less than 0.01 polling will continue until at least 1
data entry has been recorded.
If you don't want to wait while this does its job
use startStatusLog and stopStatusLog instead.
Fills the statusValues list with all the status values
read during the time period.
Fills the statusEvents list with just those status values
that are likely to be meaningful events.
the members statusValues and statusEvents will end up containing
dict like objects of the following style:
sample, time, trigIn, DIN[10], DWORD, IR[6], ADC[6]
They can be accessed as statusValues[i]['sample'] or
stautsValues[i].sample, statusValues[x].ADC[j].
Example::
bits.pollStatus()
print(bits.statusValues[0].IR[0])
will display the value of the IR InputA in the first sample recorded.
Note: Starts and stops logging for itself.
Note that the firmware in Bits# units varies over time and some
features of this class may not work for all firmware versions.
Also Bits# units can be configured in various ways via their
config.xml file so this class makes certain assumptions about the
configuration. In particular it is assumed that all digital inputs,
triggers and analog inputs are reported as part of status
updates. If some of these report are disabled in your config.xml file
then 'status' and 'event' commands in this class may not work.
"""
self.startStatusLog(t)
self.statusThread.join()
self._statusDisable()
self._getStatusLog()
self._extractStatusEvents()
self.flush()
del self.statusThread
def startStatusLog(self, t=60):
""" Start logging data from the Bits#
Starts data logging in its own thread.
Will run for t seconds, defrault 60 or until
stopStatusLog() is called.
Example::
bits.startStatusLog()
while not event
#do some processing
continue
bits.stopStatusLog()
Note that the firmware in Bits# units varies over time and some
features of this class may not work for all firmware versions.
Also Bits# units can be configured in various ways via their
config.xml file so this class makes certain assumptions about the
configuration. In particular it is assumed that all digital inputs,
triggers and analog inputs are reported as part of status
updates. If some of these report are disabled in your config.xml file
then 'status' and 'event' commands in this class may not work.
"""
if self.statusBoxEnabled:
warning = ("Cannot use status log when statusBox is on ")
raise AssertionError(warning)
if self.RTBoxEnabled:
warning = ("Cannot use status log when RTBox is on ")
raise AssertionError(warning)
# Try both Py2 and Py3 safe versions
# The two Python versions seem to want to pass args to threads in different ways.
try:
self.statusThread=threading.Thread(target=self._statusLog,args=(t,))
except Exception:
self.statusThread=threading.Thread(target=self._statusLog,args=(t))
self.statusEnd = False
self._statusEnable()
self.statusThread.start()
def stopStatusLog(self):
""" Stop logging data from the Bits#
and extracts the raw status values and significant events and puts them
in statusValues and statusEvents.
statusValues will end up containing
dict like objects of the following style:
sample, time, trigIn, DIN[10], DWORD, IR[6], ADC[6]
They can be accessed as statusValues[i]['sample'] or
statusValues[i].sample, statusValues[x].ADC[j].
StatusEvents will end up containing dict like objects of
the following style:
source, input, direction, time.
The data can be accessed as statusEvents[i]['time'] or statusEvents[i].time
Waits for _statusLog to finish properly
so can introduce a timing delay.
Example::
bits.startStatusLog()
while not event
#do some processing
continue
bits.stopStatusLog()
print(bits.statusValues[0].time)
print(bits.statusEvents[0].time)
Will display the time stamps of the first starus value recorded and the first
meaningful event.
Note that the firmware in Bits# units varies over time and some
features of this class may not work for all firmware versions.
Also Bits# units can be configured in various ways via their
config.xml file so this class makes certain assumptions about the
configuration. In particular it is assumed that all digital inputs,
triggers and analog inputs are reported as part of status
updates. If some of these report are disabled in your config.xml file
then 'status' and 'event' commands in this class may not work.
"""
self.statusEnd=True # This semaphore will tell the status logging thread to stop.
self.statusThread.join() # Join the thread and wait for it to finish.
# Get the stausts values and events form the queue.
self._getStatusLog()
self._extractStatusEvents()
del self.statusThread
def getAllStatusEvents(self):
"""Returns the whole status event list
Returns a list of dictionary like objects with the following entries
source, input, direction, time.
source = the general source of the event - e.g.
DIN for Digital input,
IR for CB6 IR response box events
input = the individual input in the source.
direction = 'up' or 'down'
time = time stamp.
All sourses are numbered from zero.
Din 0 ... 9
IR 0 ... 5
ADC 0 ... 5
mode specifies which directions of events are captured.
e.g 'up' will only report up events.
The data can be accessed as value[i]['time'] or value[i].time
Example:
bits.startStatusLog()
while not event
#do some processing
continue
bits.stopStatusLog()
res=getAllStatusEvents()
print(bits.res[0].time)
Note that the firmware in Bits# units varies over time and some
features of this class may not work for all firmware versions.
Also Bits# units can be configured in various ways via their
config.xml file so this class makes certain assumptions about the
configuration. In particular it is assumed that all digital inputs,
triggers and analog inputs are reported as part of status
updates. If some of these report are disabled in your config.xml file
then 'status' and 'event' commands in this class may not work.
"""
return self.statusEvents
def getStatusEvent(self, N=0):
""" pulls out the Nth event from the status event list
Returns a dictionary like object with the following entries
source, input, direction, time.
source = the general source of the event - e.g.
DIN for Digital input,
IR for IT response box.
input = the individual input in the source.
direction = 'up' or 'down'
time = time stamp.
All sourses are numbered from zero.
Din 0 ... 9
IR 0 ... 5
ADC 0 ... 5
mode specifies which directions of events are captured,
e.g 'up' will only report up events.
The data can be accessed as value['time'] or value.time
Example:
bits.startStatusLog()
while not event
#do some processing
continue
bits.stopStatusLog()
res=getAllStatusEvents(20)
print(bits.res.time)
Note that the firmware in Bits# units varies over time and some
features of this class may not work for all firmware versions.
Also Bits# units can be configured in various ways via their
config.xml file so this class makes certain assumptions about the
configuration. In particular it is assumed that all digital inputs,
triggers and analog inputs are reported as part of status
updates. If some of these report are disabled in your config.xml file
then 'status' and 'event' commands in this class may not work.
"""
if N < self.status_nEvents:
op = self.statusEvents[N]
return op
else:
return
def getAllStatusValues(self):
"""Returns the whole status values list.
Returns a list of dict like objects with the following entries
sample, time, trigIn, DIN[10], DWORD, IR[6], ADC[6]
sample is the sample ID number.
time is the time stamp.
trigIn is the value of the trigger input.
DIN is a list of 10 digital input values.
DWORD represents the digital inputs as a single decimal value.
IR is a list of 10 infra-red (IR) input values.
ADC is a list of 6 analog input values.
These can be accessed as value[i]['sample']
or value[i].sample, values[i].ADC[j].
All sourses are numbered | |
import numpy as np
from math import sqrt
from scipy.optimize import minimize, Bounds
from .functions import gp, link_gp
class kernel:
"""
Class that defines the GPs in the DGP hierarchy.
Args:
length (ndarray): a numpy 1d-array, whose length equals to:
1. one if the lengthscales in the kernel function are assumed same across input dimensions;
2. the total number of input dimensions, which is the sum of the number of feeding GPs
in the last layer (defined by the argument 'input_dim') and the number of connected global
input dimensions (defined by the argument 'connect'), if the lengthscales in the kernel function
are assumed different across input dimensions.
scale (float, optional): the variance of a GP. Defaults to 1..
nugget (float, optional): the nugget term of a GP. Defaults to 1e-8.
name (str, optional): kernel function to be used. Either 'sexp' for squared exponential kernel or
'matern2.5' for Matern2.5 kernel. Defaults to 'sexp'.
prior_name (str, optional): prior class. Either gamma ('ga') or inverse gamma ('inv_ga') distribution for
the lengthscales and nugget term. Set None to disable the prior. Defaults to 'ga'.
prior_coef (ndarray, optional): a numpy 1d-array that contains two values specifying the shape and rate
parameters of gamma prior, shape and scale parameters of inverse gamma prior. Defaults to np.array([1.6,0.3]).
nugget_est (int, optional): set to 1 to estimate nugget term or to 0 to fix the nugget term as specified
by the argument 'nugget'. If set to 1, the value set to the argument 'nugget' is used as the initial
value. Defaults to 0.
scale_est (int, optional): set to 1 to estimate the variance or to 0 to fix the variance as specified
by the argument 'scale'. Defaults to 0.
input_dim (ndarray, optional): a numpy 1d-array that contains the indices of GPs in the last layer
whose outputs (or the indices of dimensions in the global input if the GP is in the first layer)
feed into the GP. When set to None, all outputs from GPs of last layer (or all global input
dimensions) feed into the GP. Defaults to None.
connect (ndarray, optional): a numpy 1d-array that contains the indices of dimensions in the global
input connecting to the GP as additional input dimensions to the input obtained from the output of
GPs in the last layer (as determined by the argument 'input_dim'). When set to None, no global input
connection is implemented. Defaults to None.
Attributes:
type (str): identifies that the kernel is a GP.
g (function): a function giving the log probability density function of gamma or inverse gamma distribution
ignoring the constant part.
gfod (function): a function giving the first order derivative of g with respect to the log-transformed
lengthscales and nugget.
para_path (ndarray): a numpy 2d-array that contains the trace of model parameters. Each row is a
parameter estimate produced by one SEM iteration. The model parameters in each row are ordered as
follow: np.array([scale estimate, lengthscale estimate (whose length>=1), nugget estimate]).
last_layer_global_input (ndarray): a numpy 2d-array that contains the connect global input dimensions for
GPs in the last layer (without missingness masks). The value of this attribute is assigned during the
initialisation of 'dgp' class.
global_input (ndarray): a numpy 2d-array that contains the connect global input dimensions determined
by the argument 'connect'. The value of the attribute is assigned during the initialisation of
'dgp' class. If 'connect' is set to None, this attribute is also None. If it is for the GP in the last
layer, it is a masked (according to missingness attribute) version of last_layer_global_input.
last_layer_input (ndarray): a numpy 2d-array that contains the input training data (without missingness
masks) of the GPs in the final layer. The value of this attribute is assigned during the initialisation
of 'dgp' class.
input (ndarray): a numpy 2d-array (each row as a data point and each column as a data dimension) that
contains the input training data (according to the argument 'input_dim') to the GP. The value of
this attribute is assigned during the initialisation of 'dgp' class. If it is for the GP in the last
layer, it is a masked (according to missingness attribute) version of last_layer_input.
output (ndarray): a numpy 2d-array with only one column that contains the output training data to the GP.
The value of this attribute is assigned during the initialisation of 'dgp' class.
missingness (ndarray): a numpy 1d-array of bool that indicates the missingness in the output attributes.
If a cell is True, then the corresponding cell in the output attribute needs to be imputed. The value
of this attribute is assigned during the initialisation of 'dgp' class.
rep (ndarray): a numpy 1d-array used to re-construct repetitions in the data according to the repetitions
in the global input, i.e., rep is assigned during the initialisation of 'dgp' class if one input position
has multiple outputs. Otherwise, it is None. Defaults to None.
Remarks:
For linked GP inference, when creating kernel classes for GP nodes in each layer,
1. The 'connect' argument of the kernel class is set at its default None and not used because one needs
to explicitly specify the external inputs to each GP using the Z argument of lgp class;
2. The 'global_input' attribute in the kernel class no longer contains dimensions of global input to the
GPs in the first layer, as in DGP inference. Instead it contains external inputs
provided in the Z argument;
3. The 'missingness' attribute in the kernel class is not set and used because in linked GP inference all
internal I/O are observable;
4. The 'input_dim' argument in the kernel class needs to be specified explicitly by the user to let the
inference know which GPs in the last layer are feeding GPs. We do not implement the default setting, like
in the DGP case, that a GP is connected to all GPs in the last layers. Thus, one has to supply the 'input_dim'
argument a full GP node index in the last layer of all GPs in the last layer are feeding the GP that the kernel
class represent. For example, if one is creating a GP that has its local input produced by all 4 GPs in the
last layer, then one needs to assign np.arange(4) to the 'input_dim' argument explicitly.
"""
def __init__(self, length, scale=1., nugget=1e-8, name='sexp', prior_name='ga', prior_coef=np.array([1.6,0.3]), nugget_est=0, scale_est=0, input_dim=None, connect=None):
self.type='gp'
self.length=length
self.scale=np.atleast_1d(scale)
self.nugget=np.atleast_1d(nugget)
self.name=name
self.prior_name=prior_name
self.prior_coef=prior_coef
if self.prior_name=='ga':
self.g=lambda x: (self.prior_coef[0]-1)*np.log(x)-self.prior_coef[1]*x
self.gfod=lambda x: (self.prior_coef[0]-1)-self.prior_coef[1]*x
elif self.prior_name=='inv_ga':
self.g=lambda x: -(self.prior_coef[0]+1)*np.log(x)-self.prior_coef[1]/x
self.gfod=lambda x: -(self.prior_coef[0]+1)+self.prior_coef[1]/x
self.nugget_est=nugget_est
self.scale_est=scale_est
self.input_dim=input_dim
self.connect=connect
self.para_path=np.concatenate((self.scale,self.length,self.nugget))
self.last_layer_global_input=None
self.global_input=None
self.last_layer_input=None
self.input=None
self.output=None
self.missingness=None
self.rep=None
def log_t(self):
"""Log transform the model paramters (lengthscales and nugget).
Returns:
ndarray: a numpy 1d-array of log-transformed model paramters
"""
if self.nugget_est==1:
log_theta=np.log(np.concatenate((self.length,self.nugget)))
else:
log_theta=np.log(self.length)
return log_theta
def update(self,log_theta):
"""Update the model paramters (scale, lengthscales and nugget).
Args:
log_theta (ndarray): optimised numpy 1d-array of log-transformed lengthscales and nugget.
"""
theta=np.exp(log_theta)
if self.nugget_est==1:
self.length=theta[0:-1]
self.nugget=theta[[-1]]
else:
self.length=theta
if self.scale_est==1:
K=self.k_matrix()
KinvY=np.linalg.solve(K,self.output)
YKinvY=(self.output).T@KinvY
new_scale=YKinvY/len(self.output)
self.scale=new_scale.flatten()
def k_matrix(self):
"""Compute the correlation matrix.
Returns:
ndarray: a numpy 2d-array as the correlation matrix.
"""
n=len(self.input)
if self.global_input is not None:
X=np.concatenate((self.input, self.global_input),1)
else:
X=self.input
X_l=X/self.length
if self.name=='sexp':
L=np.sum(X_l**2,1).reshape([-1,1])
dis2=np.abs(L-2*X_l@X_l.T+L.T)
K=np.exp(-dis2)
elif self.name=='matern2.5':
X_l=np.expand_dims(X_l.T,axis=2)
dis=np.abs(X_l-X_l.transpose([0,2,1]))
K_1=np.prod(1+sqrt(5)*dis+5/3*dis**2,0)
K_2=np.exp(-sqrt(5)*np.sum(dis,0))
K=K_1*K_2
return K+self.nugget*np.eye(n)
def k_fod(self):
"""Compute first order derivatives of the correlation matrix wrt log-transformed lengthscales and nugget.
Returns:
ndarray: a numpy 3d-array that contains the first order derivatives of the correlation matrix
wrt log-transformed lengthscales and nugget. The length of the array equals to the total number
of model parameters (i.e., the total number of lengthscales and nugget).
"""
n=len(self.input)
if self.global_input is not None:
X=np.concatenate((self.input, self.global_input),1)
| |
from unittest.mock import patch, call, ANY, Mock
from mpf.core.rgb_color import RGBColor
from mpf.devices.segment_display.text_stack_entry import TextStackEntry
from mpf.devices.segment_display.transitions import NoTransition, PushTransition, CoverTransition, UncoverTransition, \
WipeTransition, TransitionRunner, SplitTransition
from mpf.devices.segment_display.segment_display_text import SegmentDisplayText
from mpf.platforms.interfaces.segment_display_platform_interface import FlashingType, \
SegmentDisplaySoftwareFlashPlatformInterface
from mpf.tests.MpfFakeGameTestCase import MpfFakeGameTestCase
from mpf.tests.MpfTestCase import test_config
class TestSegmentDisplay(MpfFakeGameTestCase):
def get_config_file(self):
return 'config.yaml'
def get_machine_path(self):
return 'tests/machine_files/segment_display/'
@test_config("game.yaml")
def test_game(self):
"""Test segment displays in a game for the documentation."""
display1 = self.machine.segment_displays["display1"]
display2 = self.machine.segment_displays["display2"]
display3 = self.machine.segment_displays["display3"]
display4 = self.machine.segment_displays["display4"]
display5 = self.machine.segment_displays["display5"]
self.assertEqual("", display1.hw_display.text)
self.assertEqual("", display2.hw_display.text)
self.assertEqual("", display3.hw_display.text)
self.assertEqual("", display4.hw_display.text)
self.assertEqual("", display5.hw_display.text)
self.start_game()
self.assertEqual("0", display1.hw_display.text)
self.assertEqual(FlashingType.FLASH_ALL, display1.hw_display.flashing)
self.assertEqual("", display2.hw_display.text)
self.assertEqual(FlashingType.NO_FLASH, display2.hw_display.flashing)
self.assertEqual("", display3.hw_display.text)
self.assertEqual(FlashingType.NO_FLASH, display3.hw_display.flashing)
self.assertEqual("", display4.hw_display.text)
self.assertEqual(FlashingType.NO_FLASH, display4.hw_display.flashing)
self.assertEqual("1", display5.hw_display.text)
self.assertEqual(FlashingType.NO_FLASH, display5.hw_display.flashing)
self.add_player()
self.assertEqual("0", display1.hw_display.text)
self.assertEqual(FlashingType.FLASH_ALL, display1.hw_display.flashing)
self.assertEqual("0", display2.hw_display.text)
self.assertEqual(FlashingType.NO_FLASH, display2.hw_display.flashing)
self.assertEqual("", display3.hw_display.text)
self.assertEqual(FlashingType.NO_FLASH, display3.hw_display.flashing)
self.assertEqual("", display4.hw_display.text)
self.assertEqual(FlashingType.NO_FLASH, display4.hw_display.flashing)
self.assertEqual("1", display5.hw_display.text)
self.assertEqual(FlashingType.NO_FLASH, display5.hw_display.flashing)
self.machine.game.player.score += 100
self.advance_time_and_run()
self.assertEqual("100", display1.hw_display.text)
self.drain_all_balls()
self.assertEqual("100", display1.hw_display.text)
self.assertEqual(FlashingType.NO_FLASH, display1.hw_display.flashing)
self.assertEqual("0", display2.hw_display.text)
self.assertEqual(FlashingType.FLASH_ALL, display2.hw_display.flashing)
self.assertEqual("", display3.hw_display.text)
self.assertEqual(FlashingType.NO_FLASH, display3.hw_display.flashing)
self.assertEqual("", display4.hw_display.text)
self.assertEqual(FlashingType.NO_FLASH, display4.hw_display.flashing)
self.assertEqual("1", display5.hw_display.text)
self.assertEqual(FlashingType.NO_FLASH, display5.hw_display.flashing)
self.machine.game.player.score += 23
self.advance_time_and_run()
self.assertEqual("100", display1.hw_display.text)
self.assertEqual("23", display2.hw_display.text)
self.drain_all_balls()
self.assertEqual("100", display1.hw_display.text)
self.assertEqual(FlashingType.FLASH_ALL, display1.hw_display.flashing)
self.assertEqual("23", display2.hw_display.text)
self.assertEqual(FlashingType.NO_FLASH, display2.hw_display.flashing)
self.assertEqual("", display3.hw_display.text)
self.assertEqual(FlashingType.NO_FLASH, display3.hw_display.flashing)
self.assertEqual("", display4.hw_display.text)
self.assertEqual(FlashingType.NO_FLASH, display4.hw_display.flashing)
self.assertEqual("2", display5.hw_display.text)
self.assertEqual(FlashingType.NO_FLASH, display5.hw_display.flashing)
self.drain_all_balls()
self.assertEqual("100", display1.hw_display.text)
self.assertEqual(FlashingType.NO_FLASH, display1.hw_display.flashing)
self.assertEqual("23", display2.hw_display.text)
self.assertEqual(FlashingType.FLASH_ALL, display2.hw_display.flashing)
self.assertEqual("", display3.hw_display.text)
self.assertEqual(FlashingType.NO_FLASH, display3.hw_display.flashing)
self.assertEqual("", display4.hw_display.text)
self.assertEqual(FlashingType.NO_FLASH, display4.hw_display.flashing)
self.assertEqual("2", display5.hw_display.text)
self.assertEqual(FlashingType.NO_FLASH, display5.hw_display.flashing)
self.drain_all_balls()
self.assertEqual("100", display1.hw_display.text)
self.assertEqual(FlashingType.FLASH_ALL, display1.hw_display.flashing)
self.assertEqual("23", display2.hw_display.text)
self.assertEqual(FlashingType.NO_FLASH, display2.hw_display.flashing)
self.assertEqual("", display3.hw_display.text)
self.assertEqual(FlashingType.NO_FLASH, display3.hw_display.flashing)
self.assertEqual("", display4.hw_display.text)
self.assertEqual(FlashingType.NO_FLASH, display4.hw_display.flashing)
self.assertEqual("3", display5.hw_display.text)
self.assertEqual(FlashingType.NO_FLASH, display5.hw_display.flashing)
self.drain_all_balls()
self.assertEqual("100", display1.hw_display.text)
self.assertEqual(FlashingType.NO_FLASH, display1.hw_display.flashing)
self.assertEqual("23", display2.hw_display.text)
self.assertEqual(FlashingType.FLASH_ALL, display2.hw_display.flashing)
self.assertEqual("", display3.hw_display.text)
self.assertEqual(FlashingType.NO_FLASH, display3.hw_display.flashing)
self.assertEqual("", display4.hw_display.text)
self.assertEqual(FlashingType.NO_FLASH, display4.hw_display.flashing)
self.assertEqual("3", display5.hw_display.text)
self.assertEqual(FlashingType.NO_FLASH, display5.hw_display.flashing)
# game ended
self.drain_all_balls()
self.assertGameIsNotRunning()
self.assertEqual("100", display1.hw_display.text)
self.assertEqual(FlashingType.NO_FLASH, display1.hw_display.flashing)
self.assertEqual("23", display2.hw_display.text)
self.assertEqual(FlashingType.NO_FLASH, display2.hw_display.flashing)
self.assertEqual("", display3.hw_display.text)
self.assertEqual(FlashingType.NO_FLASH, display3.hw_display.flashing)
self.assertEqual("", display4.hw_display.text)
self.assertEqual(FlashingType.NO_FLASH, display4.hw_display.flashing)
self.assertEqual("", display5.hw_display.text)
self.assertEqual(FlashingType.NO_FLASH, display5.hw_display.flashing)
def test_player(self):
display1 = self.machine.segment_displays["display1"]
display2 = self.machine.segment_displays["display2"]
self.post_event("test_event1")
self.advance_time_and_run()
self.assertEqual("HELLO1", display1.hw_display.text)
self.assertEqual("HELLO2", display2.hw_display.text)
self.post_event("test_event2")
self.advance_time_and_run()
self.assertEqual("", display1.hw_display.text)
self.assertEqual("HELLO2", display2.hw_display.text)
self.post_event("test_flashing")
self.assertEqual(FlashingType.FLASH_ALL, display1.hw_display.flashing)
self.post_event("test_no_flashing")
self.assertEqual(FlashingType.NO_FLASH, display1.hw_display.flashing)
self.post_event("test_event3")
self.advance_time_and_run()
self.assertEqual("", display1.hw_display.text)
self.assertEqual("", display2.hw_display.text)
self.post_event("test_score")
self.advance_time_and_run()
self.assertEqual("1: 0", display1.hw_display.text)
self.assertEqual("2: 0", display2.hw_display.text)
self.machine.variables.set_machine_var("test", 42)
self.advance_time_and_run()
self.assertEqual("1: 0", display1.hw_display.text)
self.assertEqual("2: 42", display2.hw_display.text)
self.start_game()
self.machine.game.player.score += 100
self.advance_time_and_run()
self.assertEqual("1: 100", display1.hw_display.text)
self.assertEqual("2: 42", display2.hw_display.text)
self.machine.game.player.score += 23
self.machine.variables.set_machine_var("test", 1337)
self.advance_time_and_run()
self.assertEqual("1: 123", display1.hw_display.text)
self.assertEqual("2: 1337", display2.hw_display.text)
self.post_event("test_flash")
self.advance_time_and_run(.1)
self.assertEqual("TEST", display1.hw_display.text)
self.assertEqual("2: 1337", display2.hw_display.text)
self.advance_time_and_run(2)
self.assertEqual("1: 123", display1.hw_display.text)
self.assertEqual("2: 1337", display2.hw_display.text)
self.machine.modes["mode1"].start()
self.advance_time_and_run(.1)
self.assertEqual("MODE1", display1.hw_display.text)
self.assertEqual("MODE1", display2.hw_display.text)
self.machine.modes["mode1"].stop()
self.advance_time_and_run(7)
self.assertEqual("1: 123", display1.hw_display.text)
self.assertEqual("2: 1337", display2.hw_display.text)
self.machine.modes["mode1"].start()
self.advance_time_and_run(5)
self.assertEqual("MODE1", display1.hw_display.text)
self.assertEqual("MODE1", display2.hw_display.text)
self.advance_time_and_run(5)
self.assertEqual("MODE1", display1.hw_display.text)
self.assertEqual("2: 1337", display2.hw_display.text)
def test_scoring(self):
display1 = self.machine.segment_displays["display1"]
display2 = self.machine.segment_displays["display2"]
# default scoring
self.post_event("test_score_two_player")
# one player game
self.start_game()
# first display shows score. second empty
self.assertEqual("0", display1.hw_display.text)
self.assertEqual("0", display2.hw_display.text)
# player scores
self.machine.game.player.score += 42
self.advance_time_and_run(.01)
self.assertEqual("42", display1.hw_display.text)
self.assertEqual("0", display2.hw_display.text)
# add player
self.add_player()
self.advance_time_and_run(.01)
self.assertEqual("42", display1.hw_display.text)
self.assertEqual("0", display2.hw_display.text)
@patch("mpf.platforms.interfaces.segment_display_platform_interface.SegmentDisplaySoftwareFlashPlatformInterface.__abstractmethods__", set())
@patch("mpf.platforms.interfaces.segment_display_platform_interface.SegmentDisplaySoftwareFlashPlatformInterface._set_text")
def test_software_flash_platform_interface(self, mock_set_text):
display = SegmentDisplaySoftwareFlashPlatformInterface("1")
display.set_text("12345 ABCDE", FlashingType.NO_FLASH)
display.set_software_flash(False)
self.assertTrue(mock_set_text.called)
mock_set_text.assert_has_calls([call("12345 ABCDE")])
display.set_software_flash(True)
mock_set_text.reset_mock()
display.set_text("12345 ABCDE", FlashingType.FLASH_ALL)
display.set_software_flash(False)
self.assertTrue(mock_set_text.called)
mock_set_text.assert_has_calls([call("12345 ABCDE"), call("")])
display.set_software_flash(True)
mock_set_text.reset_mock()
display.set_text("12345 ABCDE", FlashingType.FLASH_MATCH)
display.set_software_flash(False)
self.assertTrue(mock_set_text.called)
mock_set_text.assert_has_calls([call("12345 ABCDE"), call("12345 ABC ")])
display.set_software_flash(True)
mock_set_text.reset_mock()
display.set_text("12345 ABCDE", FlashingType.FLASH_MASK, "FFFFF______")
display.set_software_flash(False)
self.assertTrue(mock_set_text.called)
mock_set_text.assert_has_calls([call("12345 ABCDE"), call(" ABCDE")])
display.set_software_flash(True)
mock_set_text.reset_mock()
def test_segment_display_text(self):
"""Test the SegmentDisplayText class."""
# text equal to display length
test_text = SegmentDisplayText("test", 4, False, False)
self.assertTrue(isinstance(test_text, list))
self.assertEqual(4, len(test_text))
self.assertEqual("test", SegmentDisplayText.convert_to_str(test_text))
# text longer than display
test_text = SegmentDisplayText("testing", 4, False, False)
self.assertTrue(isinstance(test_text, list))
self.assertEqual(4, len(test_text))
self.assertEqual("ting", SegmentDisplayText.convert_to_str(test_text))
# text shorter than display
test_text = SegmentDisplayText("test", 7, False, False)
self.assertTrue(isinstance(test_text, list))
self.assertEqual(7, len(test_text))
self.assertEqual(" test", SegmentDisplayText.convert_to_str(test_text))
# collapse commas
test_text = SegmentDisplayText("25,000", 7, False, True)
self.assertTrue(isinstance(test_text, list))
self.assertEqual(7, len(test_text))
self.assertTrue(test_text[3].comma)
self.assertEqual(ord("5"), test_text[3].char_code)
self.assertFalse(test_text[4].comma)
self.assertEqual(ord("0"), test_text[4].char_code)
self.assertEqual(" 25,000", SegmentDisplayText.convert_to_str(test_text))
# do not collapse commas
test_text = SegmentDisplayText("25,000", 7, False, False)
self.assertTrue(isinstance(test_text, list))
self.assertEqual(7, len(test_text))
self.assertFalse(test_text[2].comma)
self.assertEqual(ord("5"), test_text[2].char_code)
self.assertFalse(test_text[3].comma)
self.assertEqual(ord(","), test_text[3].char_code)
self.assertEqual(" 25,000", SegmentDisplayText.convert_to_str(test_text))
# collapse dots
test_text = SegmentDisplayText("25.000", 7, True, False)
self.assertTrue(isinstance(test_text, list))
self.assertEqual(7, len(test_text))
self.assertTrue(test_text[3].dot)
self.assertEqual(ord("5"), test_text[3].char_code)
self.assertFalse(test_text[4].dot)
self.assertEqual(ord("0"), test_text[4].char_code)
self.assertEqual(" 25.000", SegmentDisplayText.convert_to_str(test_text))
# do not collapse dots
test_text = SegmentDisplayText("25.000", 7, False, False)
self.assertTrue(isinstance(test_text, list))
self.assertEqual(7, len(test_text))
self.assertFalse(test_text[2].dot)
self.assertEqual(ord("5"), test_text[2].char_code)
self.assertFalse(test_text[3].dot)
self.assertEqual(ord("."), test_text[3].char_code)
self.assertEqual(" 25.000", SegmentDisplayText.convert_to_str(test_text))
# no colors
test_text = SegmentDisplayText("COLOR", 5, False, False)
self.assertTrue(isinstance(test_text, list))
self.assertEqual(5, len(test_text))
colors = SegmentDisplayText.get_colors(test_text)
self.assertIsNone(colors)
# single color
test_text = SegmentDisplayText("COLOR", 5, False, False, [RGBColor("ffffff")])
self.assertTrue(isinstance(test_text, list))
self.assertEqual(5, len(test_text))
colors = SegmentDisplayText.get_colors(test_text)
self.assertEqual(5, len(colors))
self.assertEqual(5, colors.count(RGBColor("ffffff")))
# multiple colors
test_text = SegmentDisplayText("COLOR", 5, False, False,
[RGBColor("white"), RGBColor("red"), RGBColor("green"), RGBColor("blue"),
RGBColor("cyan")])
self.assertTrue(isinstance(test_text, list))
self.assertEqual(5, len(test_text))
colors = SegmentDisplayText.get_colors(test_text)
self.assertEqual(5, len(colors))
self.assertEqual([RGBColor("white"), RGBColor("red"), RGBColor("green"),
RGBColor("blue"), RGBColor("cyan")], colors)
# multiple colors (fewer colors than letters)
test_text = SegmentDisplayText("COLOR", 5, False, False,
[RGBColor("white"), RGBColor("red")])
self.assertTrue(isinstance(test_text, list))
self.assertEqual(5, len(test_text))
colors = SegmentDisplayText.get_colors(test_text)
self.assertEqual(5, len(colors))
self.assertEqual([RGBColor("white"), RGBColor("red"), RGBColor("red"),
RGBColor("red"), RGBColor("red")], colors)
# multiple colors (fewer colors than letters and fewer letters than characters)
test_text = SegmentDisplayText("COLOR", 8, False, False,
[RGBColor("white"), RGBColor("red")])
self.assertTrue(isinstance(test_text, list))
self.assertEqual(8, len(test_text))
colors = SegmentDisplayText.get_colors(test_text)
self.assertEqual(8, len(colors))
self.assertEqual([RGBColor("white"), RGBColor("white"), RGBColor("white"), RGBColor("white"),
RGBColor("red"), RGBColor("red"), RGBColor("red"), RGBColor("red")], colors)
def test_transitions(self):
"""Test segment display text transitions."""
self._test_no_transition()
self._test_push_transition()
self._test_cover_transition()
self._test_uncover_transition()
self._test_wipe_transition()
self._test_split_transition()
def _test_no_transition(self):
"""Test no transition."""
# no transition (with colors)
transition = NoTransition(5, False, False, {'direction': 'right'})
self.assertEqual(1, transition.get_step_count())
self.assertEqual("ABCDE",
SegmentDisplayText.convert_to_str(transition.get_transition_step(0, "12345", "ABCDE")))
self.assertEqual([RGBColor("green"), RGBColor("green"), RGBColor("green"),
RGBColor("green"), RGBColor("green")],
SegmentDisplayText.get_colors(transition.get_transition_step(0, "12345", "ABCDE",
[RGBColor("red")],
[RGBColor("green")])))
with self.assertRaises(AssertionError):
transition.get_transition_step(1, "12345", "ABCDE")
def _test_push_transition(self):
"""Test push transition."""
# push right (with colors)
transition = PushTransition(5, False, False, {'direction': 'right'})
self.assertEqual(5, transition.get_step_count())
self.assertEqual("E1234",
SegmentDisplayText.convert_to_str(transition.get_transition_step(0, "12345", "ABCDE")))
self.assertEqual([RGBColor("green"), RGBColor("red"), RGBColor("red"),
RGBColor("red"), RGBColor("red")],
SegmentDisplayText.get_colors(transition.get_transition_step(0, "12345", "ABCDE",
[RGBColor("red")],
[RGBColor("green")])))
self.assertEqual("DE123",
SegmentDisplayText.convert_to_str(transition.get_transition_step(1, "12345", "ABCDE")))
self.assertEqual([RGBColor("green"), RGBColor("green"), RGBColor("red"),
RGBColor("red"), RGBColor("red")],
SegmentDisplayText.get_colors(transition.get_transition_step(1, "12345", "ABCDE",
[RGBColor("red")],
[RGBColor("green")])))
self.assertEqual("CDE12",
SegmentDisplayText.convert_to_str(transition.get_transition_step(2, "12345", "ABCDE")))
self.assertEqual([RGBColor("green"), RGBColor("green"), RGBColor("green"),
RGBColor("red"), RGBColor("red")],
SegmentDisplayText.get_colors(transition.get_transition_step(2, "12345", "ABCDE",
[RGBColor("red")],
[RGBColor("green")])))
self.assertEqual("BCDE1",
SegmentDisplayText.convert_to_str(transition.get_transition_step(3, "12345", "ABCDE")))
self.assertEqual([RGBColor("green"), RGBColor("green"), RGBColor("green"),
RGBColor("green"), RGBColor("red")],
SegmentDisplayText.get_colors(transition.get_transition_step(3, "12345", "ABCDE",
[RGBColor("red")],
[RGBColor("green")])))
self.assertEqual("ABCDE",
SegmentDisplayText.convert_to_str(transition.get_transition_step(4, "12345", "ABCDE")))
self.assertEqual([RGBColor("green"), RGBColor("green"), RGBColor("green"),
RGBColor("green"), RGBColor("green")],
SegmentDisplayText.get_colors(transition.get_transition_step(4, "12345", "ABCDE",
[RGBColor("red")],
[RGBColor("green")])))
# push left
transition = PushTransition(5, False, False, {'direction': 'left'})
self.assertEqual(5, transition.get_step_count())
self.assertEqual("2345A",
SegmentDisplayText.convert_to_str(transition.get_transition_step(0, "12345", "ABCDE")))
self.assertEqual("345AB",
SegmentDisplayText.convert_to_str(transition.get_transition_step(1, "12345", "ABCDE")))
self.assertEqual("45ABC",
SegmentDisplayText.convert_to_str(transition.get_transition_step(2, "12345", "ABCDE")))
self.assertEqual("5ABCD",
SegmentDisplayText.convert_to_str(transition.get_transition_step(3, "12345", "ABCDE")))
self.assertEqual("ABCDE",
SegmentDisplayText.convert_to_str(transition.get_transition_step(4, "12345", "ABCDE")))
# push right (display larger than text)
transition = PushTransition(8, False, False, {'direction': 'right'})
self.assertEqual(8, transition.get_step_count())
self.assertEqual("E 1234",
SegmentDisplayText.convert_to_str(transition.get_transition_step(0, "12345", "ABCDE")))
self.assertEqual("DE 123",
SegmentDisplayText.convert_to_str(transition.get_transition_step(1, "12345", "ABCDE")))
self.assertEqual("CDE 12",
SegmentDisplayText.convert_to_str(transition.get_transition_step(2, "12345", "ABCDE")))
self.assertEqual("BCDE 1",
SegmentDisplayText.convert_to_str(transition.get_transition_step(3, "12345", "ABCDE")))
self.assertEqual("ABCDE ",
SegmentDisplayText.convert_to_str(transition.get_transition_step(4, "12345", "ABCDE")))
self.assertEqual(" ABCDE ",
SegmentDisplayText.convert_to_str(transition.get_transition_step(5, "12345", "ABCDE")))
self.assertEqual(" ABCDE ",
SegmentDisplayText.convert_to_str(transition.get_transition_step(6, "12345", "ABCDE")))
self.assertEqual(" ABCDE",
SegmentDisplayText.convert_to_str(transition.get_transition_step(7, "12345", "ABCDE")))
# push left (display larger than text)
transition = PushTransition(8, False, False, {'direction': 'left'})
self.assertEqual(8, transition.get_step_count())
self.assertEqual(" 12345 ",
SegmentDisplayText.convert_to_str(transition.get_transition_step(0, "12345", "ABCDE")))
self.assertEqual(" 12345 ",
SegmentDisplayText.convert_to_str(transition.get_transition_step(1, "12345", "ABCDE")))
self.assertEqual("12345 ",
SegmentDisplayText.convert_to_str(transition.get_transition_step(2, "12345", "ABCDE")))
self.assertEqual("2345 A",
SegmentDisplayText.convert_to_str(transition.get_transition_step(3, "12345", "ABCDE")))
self.assertEqual("345 AB",
SegmentDisplayText.convert_to_str(transition.get_transition_step(4, "12345", "ABCDE")))
self.assertEqual("45 ABC",
SegmentDisplayText.convert_to_str(transition.get_transition_step(5, "12345", "ABCDE")))
self.assertEqual("5 ABCD",
SegmentDisplayText.convert_to_str(transition.get_transition_step(6, "12345", "ABCDE")))
self.assertEqual(" ABCDE",
SegmentDisplayText.convert_to_str(transition.get_transition_step(7, "12345", "ABCDE")))
# push right (collapse commas)
transition = PushTransition(5, False, True, {'direction': 'right'})
self.assertEqual(5, transition.get_step_count())
self.assertEqual("0 1,00",
SegmentDisplayText.convert_to_str(transition.get_transition_step(0, "1,000", "25,000")))
self.assertEqual("00 1,0",
SegmentDisplayText.convert_to_str(transition.get_transition_step(1, "1,000", "25,000")))
self.assertEqual("000 1,",
SegmentDisplayText.convert_to_str(transition.get_transition_step(2, "1,000", "25,000")))
self.assertEqual("5,000 ",
SegmentDisplayText.convert_to_str(transition.get_transition_step(3, "1,000", "25,000")))
self.assertEqual("25,000",
SegmentDisplayText.convert_to_str(transition.get_transition_step(4, "1,000", "25,000")))
# push left (collapse commas)
transition = PushTransition(5, False, True, {'direction': 'left'})
self.assertEqual(5, transition.get_step_count())
self.assertEqual("1,0002",
SegmentDisplayText.convert_to_str(transition.get_transition_step(0, "1,000", "25,000")))
self.assertEqual("00025,",
SegmentDisplayText.convert_to_str(transition.get_transition_step(1, "1,000", "25,000")))
self.assertEqual("0025,0",
SegmentDisplayText.convert_to_str(transition.get_transition_step(2, "1,000", "25,000")))
self.assertEqual("025,00",
SegmentDisplayText.convert_to_str(transition.get_transition_step(3, "1,000", "25,000")))
self.assertEqual("25,000",
SegmentDisplayText.convert_to_str(transition.get_transition_step(4, "1,000", "25,000")))
# push right (with text and colors)
transition = PushTransition(5, False, False,
{'direction': 'right', 'text': '-->', 'text_color': [RGBColor("yellow")]})
self.assertEqual(8, transition.get_step_count())
self.assertEqual(">1234",
SegmentDisplayText.convert_to_str(transition.get_transition_step(0, "12345", "ABCDE")))
self.assertEqual([RGBColor("yellow"), RGBColor("red"), RGBColor("red"),
RGBColor("red"), RGBColor("red")],
SegmentDisplayText.get_colors(transition.get_transition_step(0, "12345", "ABCDE",
[RGBColor("red")],
[RGBColor("green")])))
self.assertEqual("->123",
SegmentDisplayText.convert_to_str(transition.get_transition_step(1, "12345", "ABCDE")))
self.assertEqual([RGBColor("yellow"), RGBColor("yellow"), RGBColor("red"),
RGBColor("red"), RGBColor("red")],
SegmentDisplayText.get_colors(transition.get_transition_step(1, "12345", "ABCDE",
[RGBColor("red")],
[RGBColor("green")])))
self.assertEqual("-->12",
SegmentDisplayText.convert_to_str(transition.get_transition_step(2, "12345", "ABCDE")))
self.assertEqual([RGBColor("yellow"), RGBColor("yellow"), RGBColor("yellow"),
RGBColor("red"), RGBColor("red")],
SegmentDisplayText.get_colors(transition.get_transition_step(2, "12345", "ABCDE",
[RGBColor("red")],
[RGBColor("green")])))
self.assertEqual("E-->1",
SegmentDisplayText.convert_to_str(transition.get_transition_step(3, "12345", "ABCDE")))
self.assertEqual([RGBColor("green"), RGBColor("yellow"), RGBColor("yellow"),
RGBColor("yellow"), RGBColor("red")],
SegmentDisplayText.get_colors(transition.get_transition_step(3, "12345", "ABCDE",
[RGBColor("red")],
[RGBColor("green")])))
self.assertEqual("DE-->",
SegmentDisplayText.convert_to_str(transition.get_transition_step(4, "12345", "ABCDE")))
self.assertEqual([RGBColor("green"), RGBColor("green"), RGBColor("yellow"),
RGBColor("yellow"), RGBColor("yellow")],
SegmentDisplayText.get_colors(transition.get_transition_step(4, "12345", "ABCDE",
[RGBColor("red")],
[RGBColor("green")])))
self.assertEqual("CDE--",
SegmentDisplayText.convert_to_str(transition.get_transition_step(5, "12345", "ABCDE")))
self.assertEqual([RGBColor("green"), RGBColor("green"), RGBColor("green"),
RGBColor("yellow"), RGBColor("yellow")],
SegmentDisplayText.get_colors(transition.get_transition_step(5, "12345", "ABCDE",
[RGBColor("red")],
[RGBColor("green")])))
self.assertEqual("BCDE-",
SegmentDisplayText.convert_to_str(transition.get_transition_step(6, "12345", "ABCDE")))
self.assertEqual([RGBColor("green"), RGBColor("green"), RGBColor("green"),
RGBColor("green"), RGBColor("yellow")],
SegmentDisplayText.get_colors(transition.get_transition_step(6, "12345", "ABCDE",
[RGBColor("red")],
[RGBColor("green")])))
self.assertEqual("ABCDE",
SegmentDisplayText.convert_to_str(transition.get_transition_step(7, "12345", "ABCDE")))
self.assertEqual([RGBColor("green"), RGBColor("green"), RGBColor("green"),
RGBColor("green"), RGBColor("green")],
SegmentDisplayText.get_colors(transition.get_transition_step(7, "12345", "ABCDE",
[RGBColor("red")],
[RGBColor("green")])))
# push right (with text that has color = None and colors)
transition = PushTransition(5, False, False,
{'direction': 'right', 'text': '-->', | |
test_templates/resource.py ResourceLookupSession.get_resources_template
from dlkit.abstract_osid.repository.objects import CompositionList
if self.svc_mgr.supports_composition_query():
objects = self.catalog.get_compositions()
assert isinstance(objects, CompositionList)
self.catalog.use_federated_repository_view()
objects = self.catalog.get_compositions()
assert isinstance(objects, CompositionList)
if not is_never_authz(self.service_config):
assert objects.available() > 0
else:
assert objects.available() == 0
else:
if not is_never_authz(self.service_config):
objects = self.catalog.get_compositions()
assert isinstance(objects, CompositionList)
self.catalog.use_federated_repository_view()
objects = self.catalog.get_compositions()
assert objects.available() > 0
assert isinstance(objects, CompositionList)
else:
with pytest.raises(errors.PermissionDenied):
self.catalog.get_compositions()
def test_get_composition_with_alias(self):
if not is_never_authz(self.service_config):
# Because you can't create the alias with NEVER_AUTHZ
self.catalog.alias_composition(self.composition_ids[0], ALIAS_ID)
obj = self.catalog.get_composition(ALIAS_ID)
assert obj.get_id() == self.composition_ids[0]
class FakeQuery:
_cat_id_args_list = []
@pytest.fixture(scope="class",
params=['TEST_SERVICE', 'TEST_SERVICE_ALWAYS_AUTHZ', 'TEST_SERVICE_NEVER_AUTHZ', 'TEST_SERVICE_CATALOGING', 'TEST_SERVICE_FILESYSTEM', 'TEST_SERVICE_MEMCACHE'])
def composition_query_session_class_fixture(request):
# From test_templates/resource.py::ResourceQuerySession::init_template
request.cls.service_config = request.param
request.cls.svc_mgr = Runtime().get_service_manager(
'REPOSITORY',
proxy=PROXY,
implementation=request.cls.service_config)
@pytest.fixture(scope="function")
def composition_query_session_test_fixture(request):
# From test_templates/resource.py::ResourceQuerySession::init_template
request.cls.composition_list = list()
request.cls.composition_ids = list()
if not is_never_authz(request.cls.service_config):
create_form = request.cls.svc_mgr.get_repository_form_for_create([])
create_form.display_name = 'Test Repository'
create_form.description = 'Test Repository for CompositionQuerySession tests'
request.cls.catalog = request.cls.svc_mgr.create_repository(create_form)
for color in ['Orange', 'Blue', 'Green', 'orange']:
create_form = request.cls.catalog.get_composition_form_for_create([])
create_form.display_name = 'Test Composition ' + color
create_form.description = (
'Test Composition for CompositionQuerySession tests, did I mention green')
obj = request.cls.catalog.create_composition(create_form)
request.cls.composition_list.append(obj)
request.cls.composition_ids.append(obj.ident)
else:
request.cls.catalog = request.cls.svc_mgr.get_composition_query_session(proxy=PROXY)
request.cls.session = request.cls.catalog
def test_tear_down():
if not is_never_authz(request.cls.service_config):
for obj in request.cls.catalog.get_compositions():
request.cls.catalog.delete_composition(obj.ident)
request.cls.svc_mgr.delete_repository(request.cls.catalog.ident)
request.addfinalizer(test_tear_down)
@pytest.mark.usefixtures("composition_query_session_class_fixture", "composition_query_session_test_fixture")
class TestCompositionQuerySession(object):
"""Tests for CompositionQuerySession"""
def test_get_repository_id(self):
"""Tests get_repository_id"""
# From test_templates/resource.py ResourceLookupSession.get_bin_id_template
if not is_never_authz(self.service_config):
assert self.catalog.get_repository_id() == self.catalog.ident
def test_get_repository(self):
"""Tests get_repository"""
# is this test really needed?
# From test_templates/resource.py::ResourceLookupSession::get_bin_template
if not is_never_authz(self.service_config):
assert isinstance(self.catalog.get_repository(), ABCRepository)
def test_can_search_compositions(self):
"""Tests can_search_compositions"""
# From test_templates/resource.py ResourceQuerySession::can_search_resources_template
assert isinstance(self.session.can_search_compositions(), bool)
def test_use_federated_repository_view(self):
"""Tests use_federated_repository_view"""
# From test_templates/resource.py ResourceLookupSession.use_federated_bin_view_template
self.catalog.use_federated_repository_view()
def test_use_isolated_repository_view(self):
"""Tests use_isolated_repository_view"""
# From test_templates/resource.py ResourceLookupSession.use_isolated_bin_view_template
self.catalog.use_isolated_repository_view()
def test_use_sequestered_composition_view(self):
"""Tests use_sequestered_composition_view"""
# From test_templates/repository.py::CompositionLookupSession::use_sequestered_composition_view
# Ideally also verify the value is set...
self.catalog.use_sequestered_composition_view()
def test_use_unsequestered_composition_view(self):
"""Tests use_unsequestered_composition_view"""
# From test_templates/repository.py::CompositionLookupSession::use_unsequestered_composition_view
# Ideally also verify the value is set...
self.catalog.use_unsequestered_composition_view()
def test_get_composition_query(self):
"""Tests get_composition_query"""
# From test_templates/resource.py ResourceQuerySession::get_resource_query_template
query = self.session.get_composition_query()
assert isinstance(query, ABCQueries.CompositionQuery)
def test_get_compositions_by_query(self):
"""Tests get_compositions_by_query"""
if not is_never_authz(self.service_config):
cfu = self.catalog.get_composition_form_for_update(self.composition_list[3].ident)
cfu.set_sequestered(True)
self.catalog.update_composition(cfu)
query = self.catalog.get_composition_query()
query.match_display_name('orange')
assert self.catalog.get_compositions_by_query(query).available() == 1
query.clear_display_name_terms()
query.match_display_name('blue', match=False)
assert self.catalog.get_compositions_by_query(query).available() == 2
cfu = self.catalog.get_composition_form_for_update(self.composition_list[3].ident)
cfu.set_sequestered(False)
self.catalog.update_composition(cfu)
else:
with pytest.raises(errors.PermissionDenied):
self.catalog.get_compositions_by_query(FakeQuery())
@pytest.fixture(scope="class",
params=['TEST_SERVICE', 'TEST_SERVICE_ALWAYS_AUTHZ', 'TEST_SERVICE_NEVER_AUTHZ', 'TEST_SERVICE_CATALOGING', 'TEST_SERVICE_FILESYSTEM', 'TEST_SERVICE_MEMCACHE'])
def composition_search_session_class_fixture(request):
request.cls.service_config = request.param
request.cls.svc_mgr = Runtime().get_service_manager(
'REPOSITORY',
proxy=PROXY,
implementation=request.cls.service_config)
if not is_never_authz(request.cls.service_config):
create_form = request.cls.svc_mgr.get_repository_form_for_create([])
create_form.display_name = 'Test Repository'
create_form.description = 'Test Repository for CompositionSearchSession tests'
request.cls.catalog = request.cls.svc_mgr.create_repository(create_form)
def class_tear_down():
if not is_never_authz(request.cls.service_config):
request.cls.svc_mgr.delete_repository(request.cls.catalog.ident)
request.addfinalizer(class_tear_down)
@pytest.fixture(scope="function")
def composition_search_session_test_fixture(request):
request.cls.session = request.cls.catalog
@pytest.mark.usefixtures("composition_search_session_class_fixture", "composition_search_session_test_fixture")
class TestCompositionSearchSession(object):
"""Tests for CompositionSearchSession"""
def test_get_composition_search(self):
"""Tests get_composition_search"""
# From test_templates/resource.py::ResourceSearchSession::get_resource_search_template
result = self.session.get_composition_search()
assert isinstance(result, ABCSearches.CompositionSearch)
def test_get_composition_search_order(self):
"""Tests get_composition_search_order"""
if is_never_authz(self.service_config):
pass # no object to call the method on?
else:
with pytest.raises(errors.Unimplemented):
self.session.get_composition_search_order()
def test_get_compositions_by_search(self):
"""Tests get_compositions_by_search"""
# From test_templates/resource.py::ResourceSearchSession::get_resources_by_search_template
query = self.catalog.get_composition_query()
search = self.session.get_composition_search()
results = self.session.get_compositions_by_search(query, search)
assert isinstance(results, ABCSearches.CompositionSearchResults)
def test_get_composition_query_from_inspector(self):
"""Tests get_composition_query_from_inspector"""
if is_never_authz(self.service_config):
pass # no object to call the method on?
elif uses_cataloging(self.service_config):
pass # cannot call the _get_record() methods on catalogs
else:
with pytest.raises(errors.Unimplemented):
self.session.get_composition_query_from_inspector(True)
@pytest.fixture(scope="class",
params=['TEST_SERVICE', 'TEST_SERVICE_ALWAYS_AUTHZ', 'TEST_SERVICE_NEVER_AUTHZ', 'TEST_SERVICE_CATALOGING', 'TEST_SERVICE_FILESYSTEM', 'TEST_SERVICE_MEMCACHE'])
def composition_admin_session_class_fixture(request):
# From test_templates/resource.py::ResourceAdminSession::init_template
request.cls.service_config = request.param
request.cls.svc_mgr = Runtime().get_service_manager(
'REPOSITORY',
proxy=PROXY,
implementation=request.cls.service_config)
request.cls.assessment_mgr = Runtime().get_service_manager(
'ASSESSMENT',
proxy=PROXY,
implementation=request.cls.service_config)
request.cls.fake_id = Id('resource.Resource%3Afake%40DLKIT.MIT.EDU')
if not is_never_authz(request.cls.service_config):
create_form = request.cls.svc_mgr.get_repository_form_for_create([])
create_form.display_name = 'Test Repository'
create_form.description = 'Test Repository for CompositionAdminSession tests'
request.cls.catalog = request.cls.svc_mgr.create_repository(create_form)
else:
request.cls.catalog = request.cls.svc_mgr.get_composition_admin_session(proxy=PROXY)
def class_tear_down():
if not is_never_authz(request.cls.service_config):
for obj in request.cls.catalog.get_compositions():
request.cls.catalog.delete_composition(obj.ident)
request.cls.svc_mgr.delete_repository(request.cls.catalog.ident)
request.addfinalizer(class_tear_down)
@pytest.fixture(scope="function")
def composition_admin_session_test_fixture(request):
# From test_templates/resource.py::ResourceAdminSession::init_template
if not is_never_authz(request.cls.service_config):
request.cls.form = request.cls.catalog.get_composition_form_for_create([])
request.cls.form.display_name = 'new Composition'
request.cls.form.description = 'description of Composition'
request.cls.form.set_genus_type(NEW_TYPE)
request.cls.osid_object = request.cls.catalog.create_composition(request.cls.form)
request.cls.session = request.cls.catalog
def test_tear_down():
# From test_templates/resource.py::ResourceAdminSession::init_template
if not is_never_authz(request.cls.service_config):
request.cls.catalog.delete_composition(request.cls.osid_object.ident)
request.addfinalizer(test_tear_down)
@pytest.mark.usefixtures("composition_admin_session_class_fixture", "composition_admin_session_test_fixture")
class TestCompositionAdminSession(object):
"""Tests for CompositionAdminSession"""
def test_get_repository_id(self):
"""Tests get_repository_id"""
# From test_templates/resource.py ResourceLookupSession.get_bin_id_template
if not is_never_authz(self.service_config):
assert self.catalog.get_repository_id() == self.catalog.ident
def test_get_repository(self):
"""Tests get_repository"""
# is this test really needed?
# From test_templates/resource.py::ResourceLookupSession::get_bin_template
if not is_never_authz(self.service_config):
assert isinstance(self.catalog.get_repository(), ABCRepository)
def test_can_create_compositions(self):
"""Tests can_create_compositions"""
# From test_templates/resource.py::ResourceAdminSession::can_create_resources_template
assert isinstance(self.catalog.can_create_compositions(), bool)
def test_can_create_composition_with_record_types(self):
"""Tests can_create_composition_with_record_types"""
# From test_templates/resource.py::ResourceAdminSession::can_create_resource_with_record_types_template
assert isinstance(self.catalog.can_create_composition_with_record_types(DEFAULT_TYPE), bool)
def test_get_composition_form_for_create(self):
"""Tests get_composition_form_for_create"""
# From test_templates/resource.py::ResourceAdminSession::get_resource_form_for_create_template
if not is_never_authz(self.service_config):
form = self.catalog.get_composition_form_for_create([])
assert isinstance(form, OsidForm)
assert not form.is_for_update()
with pytest.raises(errors.InvalidArgument):
self.catalog.get_composition_form_for_create([1])
else:
with pytest.raises(errors.PermissionDenied):
self.catalog.get_composition_form_for_create([])
def test_create_composition(self):
"""Tests create_composition"""
# From test_templates/resource.py::ResourceAdminSession::create_resource_template
from dlkit.abstract_osid.repository.objects import Composition
if not is_never_authz(self.service_config):
assert isinstance(self.osid_object, Composition)
assert self.osid_object.display_name.text == 'new Composition'
assert self.osid_object.description.text == 'description of Composition'
assert self.osid_object.genus_type == NEW_TYPE
with pytest.raises(errors.IllegalState):
self.catalog.create_composition(self.form)
with pytest.raises(errors.InvalidArgument):
self.catalog.create_composition('I Will Break You!')
update_form = self.catalog.get_composition_form_for_update(self.osid_object.ident)
with pytest.raises(errors.InvalidArgument):
self.catalog.create_composition(update_form)
else:
with pytest.raises(errors.PermissionDenied):
self.catalog.create_composition('foo')
def test_can_update_compositions(self):
"""Tests can_update_compositions"""
# From test_templates/resource.py::ResourceAdminSession::can_update_resources_template
assert isinstance(self.catalog.can_update_compositions(), bool)
def test_get_composition_form_for_update(self):
"""Tests get_composition_form_for_update"""
# From test_templates/resource.py::ResourceAdminSession::get_resource_form_for_update_template
if not is_never_authz(self.service_config):
form = self.catalog.get_composition_form_for_update(self.osid_object.ident)
assert isinstance(form, OsidForm)
assert form.is_for_update()
with pytest.raises(errors.InvalidArgument):
self.catalog.get_composition_form_for_update(['This is Doomed!'])
with pytest.raises(errors.InvalidArgument):
self.catalog.get_composition_form_for_update(
Id(authority='Respect my Authoritay!',
namespace='repository.{object_name}',
identifier='1'))
else:
with pytest.raises(errors.PermissionDenied):
self.catalog.get_composition_form_for_update(self.fake_id)
def test_update_composition(self):
"""Tests update_composition"""
# From test_templates/resource.py::ResourceAdminSession::update_resource_template
if not is_never_authz(self.service_config):
from dlkit.abstract_osid.repository.objects import Composition
form = self.catalog.get_composition_form_for_update(self.osid_object.ident)
form.display_name = 'new name'
form.description = 'new description'
form.set_genus_type(NEW_TYPE_2)
updated_object = self.catalog.update_composition(form)
assert isinstance(updated_object, Composition)
assert updated_object.ident == self.osid_object.ident
assert updated_object.display_name.text == 'new name'
assert updated_object.description.text == 'new description'
assert updated_object.genus_type == NEW_TYPE_2
with pytest.raises(errors.IllegalState):
self.catalog.update_composition(form)
with pytest.raises(errors.InvalidArgument):
self.catalog.update_composition('I Will Break You!')
with pytest.raises(errors.InvalidArgument):
self.catalog.update_composition(self.form)
else:
with pytest.raises(errors.PermissionDenied):
self.catalog.update_composition('foo')
def test_can_delete_compositions(self):
"""Tests can_delete_compositions"""
# From test_templates/resource.py::ResourceAdminSession::can_delete_resources_template
assert isinstance(self.catalog.can_delete_compositions(), bool)
def test_delete_composition(self):
"""Tests delete_composition"""
# From test_templates/resource.py::ResourceAdminSession::delete_resource_template
if not is_never_authz(self.service_config):
form = self.catalog.get_composition_form_for_create([])
form.display_name = 'new Composition'
form.description = 'description of Composition'
form.set_genus_type(NEW_TYPE)
osid_object = self.catalog.create_composition(form)
self.catalog.delete_composition(osid_object.ident)
with pytest.raises(errors.NotFound):
self.catalog.get_composition(osid_object.ident)
else:
with pytest.raises(errors.PermissionDenied):
self.catalog.delete_composition(self.fake_id)
def test_delete_composition_node(self):
"""Tests delete_composition_node"""
if is_never_authz(self.service_config):
pass # no object to call the method on?
elif uses_cataloging(self.service_config):
pass # cannot call the _get_record() methods on catalogs
else:
with pytest.raises(errors.Unimplemented):
self.session.delete_composition_node(True)
def test_add_composition_child(self):
"""Tests add_composition_child"""
if is_never_authz(self.service_config):
pass # no object to call the method on?
elif uses_cataloging(self.service_config):
pass # cannot call the _get_record() methods on catalogs
else:
with pytest.raises(errors.Unimplemented):
self.session.add_composition_child(True, True)
def test_remove_composition_child(self):
"""Tests remove_composition_child"""
if is_never_authz(self.service_config):
pass # no object to call the method on?
elif uses_cataloging(self.service_config):
pass # cannot call the _get_record() methods on catalogs
else:
with pytest.raises(errors.Unimplemented):
self.session.remove_composition_child(True, True)
def test_can_manage_composition_aliases(self):
"""Tests can_manage_composition_aliases"""
# From test_templates/resource.py::ResourceAdminSession::can_manage_resource_aliases_template
assert isinstance(self.catalog.can_manage_composition_aliases(), bool)
def test_alias_composition(self):
"""Tests alias_composition"""
# From test_templates/resource.py::ResourceAdminSession::alias_resource_template
if not is_never_authz(self.service_config):
alias_id = Id(self.catalog.ident.namespace + '%3Amy-alias%40ODL.MIT.EDU')
self.catalog.alias_composition(self.osid_object.ident, alias_id)
aliased_object = self.catalog.get_composition(alias_id)
assert aliased_object.ident == self.osid_object.ident
else:
with pytest.raises(errors.PermissionDenied):
self.catalog.alias_composition(self.fake_id, self.fake_id)
def test_composition_assignment(self):
if not is_never_authz(self.service_config):
composition_list = list()
composition_ids = list()
for num in [0, 1, 2, 3]:
create_form = self.catalog.get_composition_form_for_create([])
create_form.display_name = 'Test Composition ' + str(num)
create_form.description = 'Test Composition for CompositionLookupSession tests'
obj = self.catalog.create_composition(create_form)
composition_list.append(obj)
composition_ids.append(obj.ident)
update_form = self.catalog.get_composition_form_for_update(composition_ids[0])
update_form.set_children(composition_ids[1:])
self.catalog.update_composition(update_form)
composition = self.catalog.get_composition(composition_ids[0])
assert composition.get_children_ids().available() == 3
assert composition.get_child_ids().available() == 3
assert composition.get_children().available() == 3
@pytest.fixture(scope="class",
params=['TEST_SERVICE', 'TEST_SERVICE_ALWAYS_AUTHZ', 'TEST_SERVICE_NEVER_AUTHZ', 'TEST_SERVICE_CATALOGING', 'TEST_SERVICE_FILESYSTEM', 'TEST_SERVICE_MEMCACHE'])
def composition_repository_session_class_fixture(request):
# From test_templates/resource.py::ResourceBinSession::init_template
request.cls.service_config = request.param
request.cls.composition_list = list()
request.cls.composition_ids = list()
request.cls.svc_mgr = Runtime().get_service_manager(
'REPOSITORY',
proxy=PROXY,
implementation=request.cls.service_config)
request.cls.fake_id = Id('resource.Resource%3Afake%40DLKIT.MIT.EDU')
if not is_never_authz(request.cls.service_config):
create_form = request.cls.svc_mgr.get_repository_form_for_create([])
create_form.display_name = 'Test Repository'
create_form.description = 'Test Repository for CompositionRepositorySession tests'
request.cls.catalog = request.cls.svc_mgr.create_repository(create_form)
create_form = request.cls.svc_mgr.get_repository_form_for_create([])
create_form.display_name = 'Test Repository for Assignment'
create_form.description = 'Test Repository for CompositionRepositorySession tests assignment'
request.cls.assigned_catalog = request.cls.svc_mgr.create_repository(create_form)
for num in [0, 1, 2]:
create_form = request.cls.catalog.get_composition_form_for_create([])
create_form.display_name = 'Test Composition ' + str(num)
create_form.description = 'Test Composition for CompositionRepositorySession tests'
obj = request.cls.catalog.create_composition(create_form)
request.cls.composition_list.append(obj)
request.cls.composition_ids.append(obj.ident)
request.cls.svc_mgr.assign_composition_to_repository(
request.cls.composition_ids[1], request.cls.assigned_catalog.ident)
request.cls.svc_mgr.assign_composition_to_repository(
request.cls.composition_ids[2], request.cls.assigned_catalog.ident)
def class_tear_down():
if not is_never_authz(request.cls.service_config):
request.cls.svc_mgr.unassign_composition_from_repository(
request.cls.composition_ids[1], request.cls.assigned_catalog.ident)
request.cls.svc_mgr.unassign_composition_from_repository(
request.cls.composition_ids[2], request.cls.assigned_catalog.ident)
for obj in request.cls.catalog.get_compositions():
request.cls.catalog.delete_composition(obj.ident)
request.cls.svc_mgr.delete_repository(request.cls.assigned_catalog.ident)
request.cls.svc_mgr.delete_repository(request.cls.catalog.ident)
request.addfinalizer(class_tear_down)
@pytest.fixture(scope="function")
def composition_repository_session_test_fixture(request):
# From test_templates/resource.py::ResourceBinSession::init_template
request.cls.session = request.cls.svc_mgr
@pytest.mark.usefixtures("composition_repository_session_class_fixture", "composition_repository_session_test_fixture")
class TestCompositionRepositorySession(object):
"""Tests for CompositionRepositorySession"""
def test_use_comparative_composition_repository_view(self):
"""Tests use_comparative_composition_repository_view"""
# From test_templates/resource.py::BinLookupSession::use_comparative_bin_view_template
self.svc_mgr.use_comparative_composition_repository_view()
def test_use_plenary_composition_repository_view(self):
"""Tests use_plenary_composition_repository_view"""
# From test_templates/resource.py::BinLookupSession::use_plenary_bin_view_template
self.svc_mgr.use_plenary_composition_repository_view()
def test_can_lookup_composition_repository_mappings(self):
"""Tests can_lookup_composition_repository_mappings"""
# From test_templates/resource.py::ResourceBinSession::can_lookup_resource_bin_mappings
result = self.session.can_lookup_composition_repository_mappings()
assert isinstance(result, bool)
def test_get_composition_ids_by_repository(self):
"""Tests get_composition_ids_by_repository"""
# From test_templates/resource.py::ResourceBinSession::get_resource_ids_by_bin_template
if not is_never_authz(self.service_config):
objects = self.svc_mgr.get_composition_ids_by_repository(self.assigned_catalog.ident)
assert objects.available() == 2
else:
with pytest.raises(errors.PermissionDenied):
self.svc_mgr.get_composition_ids_by_repository(self.fake_id)
def test_get_compositions_by_repository(self):
"""Tests get_compositions_by_repository"""
# From test_templates/resource.py::ResourceBinSession::get_resources_by_bin_template
if not is_never_authz(self.service_config):
results = self.session.get_compositions_by_repository(self.assigned_catalog.ident)
assert isinstance(results, ABCObjects.CompositionList)
assert results.available() == 2
else:
with pytest.raises(errors.PermissionDenied):
self.session.get_compositions_by_repository(self.fake_id)
def test_get_composition_ids_by_repositories(self):
"""Tests get_composition_ids_by_repositories"""
# From test_templates/resource.py::ResourceBinSession::get_resource_ids_by_bins_template
if not is_never_authz(self.service_config):
catalog_ids = [self.catalog.ident, self.assigned_catalog.ident]
object_ids = self.session.get_composition_ids_by_repositories(catalog_ids)
assert isinstance(object_ids, IdList)
| |
Csdm = unop_dict(A, lambda aij: aij*b)
return A.new(Csdm, A.shape, A.domain)
def add(A, B):
"""
Adds two :py:class:`~.SDM` matrices
Examples
========
>>> from sympy import ZZ
>>> from sympy.polys.matrices.sdm import SDM
>>> A = SDM({0:{1: ZZ(2)}, 1:{0:ZZ(1)}}, (2, 2), ZZ)
>>> B = SDM({0:{0: ZZ(3)}, 1:{1:ZZ(4)}}, (2, 2), ZZ)
>>> A.add(B)
{0: {0: 3, 1: 2}, 1: {0: 1, 1: 4}}
"""
Csdm = binop_dict(A, B, add, pos, pos)
return A.new(Csdm, A.shape, A.domain)
def sub(A, B):
"""
Subtracts two :py:class:`~.SDM` matrices
Examples
========
>>> from sympy import ZZ
>>> from sympy.polys.matrices.sdm import SDM
>>> A = SDM({0:{1: ZZ(2)}, 1:{0:ZZ(1)}}, (2, 2), ZZ)
>>> B = SDM({0:{0: ZZ(3)}, 1:{1:ZZ(4)}}, (2, 2), ZZ)
>>> A.sub(B)
{0: {0: -3, 1: 2}, 1: {0: 1, 1: -4}}
"""
Csdm = binop_dict(A, B, sub, pos, neg)
return A.new(Csdm, A.shape, A.domain)
def neg(A):
"""
Returns the negative of a :py:class:`~.SDM` matrix
Examples
========
>>> from sympy import ZZ
>>> from sympy.polys.matrices.sdm import SDM
>>> A = SDM({0:{1: ZZ(2)}, 1:{0:ZZ(1)}}, (2, 2), ZZ)
>>> A.neg()
{0: {1: -2}, 1: {0: -1}}
"""
Csdm = unop_dict(A, neg)
return A.new(Csdm, A.shape, A.domain)
def convert_to(A, K):
"""
Converts the :py:class:`~.Domain` of a :py:class:`~.SDM` matrix to K
Examples
========
>>> from sympy import ZZ, QQ
>>> from sympy.polys.matrices.sdm import SDM
>>> A = SDM({0:{1: ZZ(2)}, 1:{0:ZZ(1)}}, (2, 2), ZZ)
>>> A.convert_to(QQ)
{0: {1: 2}, 1: {0: 1}}
"""
Kold = A.domain
if K == Kold:
return A.copy()
Ak = unop_dict(A, lambda e: K.convert_from(e, Kold))
return A.new(Ak, A.shape, K)
def rref(A):
"""
Returns reduced-row echelon form and list of pivots for the :py:class:`~.SDM`
Examples
========
>>> from sympy import QQ
>>> from sympy.polys.matrices.sdm import SDM
>>> A = SDM({0:{0:QQ(1), 1:QQ(2)}, 1:{0:QQ(2), 1:QQ(4)}}, (2, 2), QQ)
>>> A.rref()
({0: {0: 1, 1: 2}}, [0])
"""
B, pivots, _ = sdm_irref(A)
return A.new(B, A.shape, A.domain), pivots
def inv(A):
"""
Returns inverse of a matrix A
Examples
========
>>> from sympy import QQ
>>> from sympy.polys.matrices.sdm import SDM
>>> A = SDM({0:{0:QQ(1), 1:QQ(2)}, 1:{0:QQ(3), 1:QQ(4)}}, (2, 2), QQ)
>>> A.inv()
{0: {0: -2, 1: 1}, 1: {0: 3/2, 1: -1/2}}
"""
return A.from_ddm(A.to_ddm().inv())
def det(A):
"""
Returns determinant of A
Examples
========
>>> from sympy import QQ
>>> from sympy.polys.matrices.sdm import SDM
>>> A = SDM({0:{0:QQ(1), 1:QQ(2)}, 1:{0:QQ(3), 1:QQ(4)}}, (2, 2), QQ)
>>> A.det()
-2
"""
return A.to_ddm().det()
def lu(A):
"""
Returns LU decomposition for a matrix A
Examples
========
>>> from sympy import QQ
>>> from sympy.polys.matrices.sdm import SDM
>>> A = SDM({0:{0:QQ(1), 1:QQ(2)}, 1:{0:QQ(3), 1:QQ(4)}}, (2, 2), QQ)
>>> A.lu()
({0: {0: 1}, 1: {0: 3, 1: 1}}, {0: {0: 1, 1: 2}, 1: {1: -2}}, [])
"""
L, U, swaps = A.to_ddm().lu()
return A.from_ddm(L), A.from_ddm(U), swaps
def lu_solve(A, b):
"""
Uses LU decomposition to solve Ax = b,
Examples
========
>>> from sympy import QQ
>>> from sympy.polys.matrices.sdm import SDM
>>> A = SDM({0:{0:QQ(1), 1:QQ(2)}, 1:{0:QQ(3), 1:QQ(4)}}, (2, 2), QQ)
>>> b = SDM({0:{0:QQ(1)}, 1:{0:QQ(2)}}, (2, 1), QQ)
>>> A.lu_solve(b)
{1: {0: 1/2}}
"""
return A.from_ddm(A.to_ddm().lu_solve(b.to_ddm()))
def nullspace(A):
"""
Returns nullspace for a :py:class:`~.SDM` matrix A
Examples
========
>>> from sympy import QQ
>>> from sympy.polys.matrices.sdm import SDM
>>> A = SDM({0:{0:QQ(1), 1:QQ(2)}, 1:{0: QQ(2), 1: QQ(4)}}, (2, 2), QQ)
>>> A.nullspace()
({0: {0: -2, 1: 1}}, [1])
"""
ncols = A.shape[1]
one = A.domain.one
B, pivots, nzcols = sdm_irref(A)
K, nonpivots = sdm_nullspace_from_rref(B, one, ncols, pivots, nzcols)
K = dict(enumerate(K))
shape = (len(K), ncols)
return A.new(K, shape, A.domain), nonpivots
def particular(A):
ncols = A.shape[1]
B, pivots, nzcols = sdm_irref(A)
P = sdm_particular_from_rref(B, ncols, pivots)
rep = {0:P} if P else {}
return A.new(rep, (1, ncols-1), A.domain)
def hstack(A, *B):
"""
Horizontally stacks two :py:class:`~.SDM` matrices A & B
Examples
========
>>> from sympy import QQ
>>> from sympy.polys.matrices.sdm import SDM
>>> B = SDM({0:{0:QQ(1)}, 1:{0:QQ(2)}}, (2, 1), QQ)
>>> A = SDM({0:{0:QQ(1), 1:QQ(2)}, 1:{0:QQ(3), 1:QQ(4)}}, (2, 2), QQ)
>>> A.hstack(B)
{0: {0: 1, 1: 2, 2: 1}, 1: {0: 3, 1: 4, 2: 2}}
"""
Anew = dict(A.copy())
rows, cols = A.shape
domain = A.domain
for Bk in B:
Bkrows, Bkcols = Bk.shape
assert Bkrows == rows
assert Bk.domain == domain
for i, Bki in Bk.items():
Ai = Anew.get(i, None)
if Ai is None:
Anew[i] = Ai = {}
for j, Bkij in Bki.items():
Ai[j + cols] = Bkij
cols += Bkcols
return A.new(Anew, (rows, cols), A.domain)
def vstack(A, *B):
Anew = dict(A.copy())
rows, cols = A.shape
domain = A.domain
for Bk in B:
Bkrows, Bkcols = Bk.shape
assert Bkcols == cols
assert Bk.domain == domain
for i, Bki in Bk.items():
Anew[i + rows] = Bki
rows += Bkrows
return A.new(Anew, (rows, cols), A.domain)
def applyfunc(self, func, domain):
sdm = {i: {j: func(e) for j, e in row.items()} for i, row in self.items()}
return self.new(sdm, self.shape, domain)
def charpoly(A):
"""
Returns the coefficients of the characteristic polynomial
of the :py:class:`~.SDM` matrix. These elements will be domain elements.
The domain of the elements will be same as domain of the :py:class:`~.SDM`.
Examples
========
>>> from sympy import QQ, Symbol
>>> from sympy.polys.matrices.sdm import SDM
>>> from sympy.polys import Poly
>>> A = SDM({0:{0:QQ(1), 1:QQ(2)}, 1:{0:QQ(3), 1:QQ(4)}}, (2, 2), QQ)
>>> A.charpoly()
[1, -5, -2]
We can create a polynomial using the
coefficients using :py:class:`~.Poly`
>>> x = Symbol('x')
>>> p = Poly(A.charpoly(), x, domain=A.domain)
>>> p
Poly(x**2 - 5*x - 2, x, domain='QQ')
"""
return A.to_ddm().charpoly()
def binop_dict(A, B, fab, fa, fb):
Anz, Bnz = set(A), set(B)
C = {}
for i in Anz & Bnz:
Ai, Bi = A[i], B[i]
Ci = {}
Anzi, Bnzi = set(Ai), set(Bi)
for j in Anzi & Bnzi:
elem = fab(Ai[j], Bi[j])
if elem:
Ci[j] = elem
for j in Anzi - Bnzi:
Ci[j] = fa(Ai[j])
for j in Bnzi - Anzi:
Ci[j] = fb(Bi[j])
if Ci:
C[i] = Ci
for i in Anz - Bnz:
Ai = A[i]
C[i] = {j: fa(Aij) for j, Aij in Ai.items()}
for i in Bnz - Anz:
Bi = B[i]
C[i] = {j: fb(Bij) for j, Bij in Bi.items()}
return C
def unop_dict(A, f):
B = {}
for i, Ai in A.items():
Bi = {}
for j, Aij in Ai.items():
Bij = f(Aij)
if Bij:
Bi[j] = Bij
if Bi:
B[i] = Bi
return B
def sdm_transpose(M):
MT = {}
for i, Mi in M.items():
for j, Mij in Mi.items():
try:
MT[j][i] = Mij
except KeyError:
MT[j] = {i: Mij}
return MT
def sdm_matmul(A, B):
#
# Should be fast if A and B are very sparse.
# Consider e.g. A = B = eye(1000).
#
# The idea here is that we compute C = A*B in terms of the rows of C and
# B since the dict of dicts representation naturally stores the matrix as
# rows. The ith row of C (Ci) is equal to the sum of Aik * Bk where Bk is
# the kth row of B. The algorithm below loops over each nonzero element
# Aik of A and if the corresponding row Bj is nonzero then we do
# Ci += Aik * Bk.
# To make this more efficient we don't need to loop over all elements Aik.
# Instead for each row Ai we compute the intersection of the nonzero
# columns in Ai with the nonzero rows in B. That gives the k such that
# Aik and Bk are both nonzero. In Python the intersection of two sets
# of int can be computed very efficiently.
#
C = {}
B_knz = set(B)
for i, Ai in A.items():
Ci = {}
Ai_knz = set(Ai)
for k in Ai_knz & B_knz:
Aik = Ai[k]
for j, Bkj in B[k].items():
Cij = Ci.get(j, None)
if Cij is not None:
Cij = Cij + Aik * Bkj
if Cij:
Ci[j] = Cij
else:
| |
<reponame>ecmwf/pyeccodes
import pyeccodes.accessors as _
def load(h):
def wrapped(h):
discipline = h.get_l('discipline')
parameterCategory = h.get_l('parameterCategory')
parameterNumber = h.get_l('parameterNumber')
instrumentType = h.get_l('instrumentType')
satelliteSeries = h.get_l('satelliteSeries')
scaledValueOfCentralWaveNumber = h.get_l('scaledValueOfCentralWaveNumber')
satelliteNumber = h.get_l('satelliteNumber')
typeOfGeneratingProcess = h.get_l('typeOfGeneratingProcess')
if discipline == 3 and parameterCategory == 0 and parameterNumber == 2 and instrumentType == 207 and satelliteSeries == 333 and scaledValueOfCentralWaveNumber == 136986 and satelliteNumber == 72 and typeOfGeneratingProcess == 8:
return 'Obser. Sat. Meteosat sec. generation brightness temperature'
if discipline == 3 and parameterCategory == 0 and parameterNumber == 2 and instrumentType == 207 and satelliteSeries == 333 and scaledValueOfCentralWaveNumber == 161290 and satelliteNumber == 72 and typeOfGeneratingProcess == 8:
return 'Obser. Sat. Meteosat sec. generation brightness temperature'
if discipline == 3 and parameterCategory == 0 and parameterNumber == 2 and scaledValueOfCentralWaveNumber == 103092 and satelliteNumber == 72 and typeOfGeneratingProcess == 8 and instrumentType == 207 and satelliteSeries == 333:
return 'Obser. Sat. Meteosat sec. generation brightness temperature'
if discipline == 3 and parameterCategory == 0 and parameterNumber == 2 and scaledValueOfCentralWaveNumber == 114942 and satelliteNumber == 72 and typeOfGeneratingProcess == 8 and instrumentType == 207 and satelliteSeries == 333:
return 'Obser. Sat. Meteosat sec. generation brightness temperature'
if discipline == 3 and parameterCategory == 0 and parameterNumber == 2 and instrumentType == 207 and satelliteSeries == 333 and scaledValueOfCentralWaveNumber == 256410 and satelliteNumber == 72 and typeOfGeneratingProcess == 8:
return 'Obser. Sat. Meteosat sec. generation brightness temperature'
if discipline == 3 and parameterCategory == 0 and parameterNumber == 2 and instrumentType == 207 and satelliteSeries == 333 and scaledValueOfCentralWaveNumber == 74626 and satelliteNumber == 72 and typeOfGeneratingProcess == 8:
return 'Obser. Sat. Meteosat sec. generation brightness temperature'
if discipline == 3 and parameterCategory == 0 and parameterNumber == 2 and satelliteNumber == 72 and typeOfGeneratingProcess == 8 and instrumentType == 207 and satelliteSeries == 333 and scaledValueOfCentralWaveNumber == 83333:
return 'Obser. Sat. Meteosat sec. generation brightness temperature'
if discipline == 3 and parameterCategory == 0 and parameterNumber == 2 and scaledValueOfCentralWaveNumber == 92592 and satelliteNumber == 72 and typeOfGeneratingProcess == 8 and instrumentType == 207 and satelliteSeries == 333:
return 'Obser. Sat. Meteosat sec. generation brightness temperature'
if discipline == 3 and parameterCategory == 0 and parameterNumber == 1 and instrumentType == 207 and satelliteSeries == 333 and scaledValueOfCentralWaveNumber == 1250000 and satelliteNumber == 72 and typeOfGeneratingProcess == 8:
return 'Obser. Sat. Meteosat sec. generation Albedo (scaled)'
if discipline == 3 and parameterCategory == 0 and parameterNumber == 1 and instrumentType == 207 and satelliteSeries == 333 and scaledValueOfCentralWaveNumber == 1666666 and satelliteNumber == 72 and typeOfGeneratingProcess == 8:
return 'Obser. Sat. Meteosat sec. generation Albedo (scaled)'
if discipline == 3 and parameterCategory == 0 and parameterNumber == 1 and satelliteNumber == 72 and typeOfGeneratingProcess == 8 and instrumentType == 207 and satelliteSeries == 333 and scaledValueOfCentralWaveNumber == 625000:
return 'Obser. Sat. Meteosat sec. generation Albedo (scaled)'
if discipline == 3 and parameterCategory == 0 and parameterNumber == 1 and scaledValueOfCentralWaveNumber == 2000000 and satelliteNumber == 72 and typeOfGeneratingProcess == 8 and instrumentType == 207 and satelliteSeries == 333:
return 'Obser. Sat. Meteosat sec. generation Albedo (scaled)'
scaledValueOfFirstFixedSurface = h.get_l('scaledValueOfFirstFixedSurface')
typeOfFirstFixedSurface = h.get_l('typeOfFirstFixedSurface')
scaleFactorOfFirstFixedSurface = h.get_l('scaleFactorOfFirstFixedSurface')
typeOfStatisticalProcessing = h.get_l('typeOfStatisticalProcessing')
if discipline == 0 and parameterCategory == 2 and parameterNumber == 22 and scaledValueOfFirstFixedSurface == 10 and typeOfFirstFixedSurface == 103 and typeOfGeneratingProcess == 198 and scaleFactorOfFirstFixedSurface == 0 and typeOfStatisticalProcessing == 2:
return 'calibrated forecast, wind speed (gust)'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 56 and typeOfFirstFixedSurface == 1 and typeOfGeneratingProcess == 198 and typeOfStatisticalProcessing == 1:
return 'calibrated forecast, large-scale snowfall rate w.e.'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 52 and typeOfFirstFixedSurface == 1 and typeOfGeneratingProcess == 198 and typeOfStatisticalProcessing == 1:
return 'calibrated forecast, total precipitation rate'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 22 and typeOfFirstFixedSurface == 103 and typeOfGeneratingProcess == 197 and scaleFactorOfFirstFixedSurface == 0 and typeOfStatisticalProcessing == 2 and scaledValueOfFirstFixedSurface == 10:
return 'smoothed forecast, wind speed (gust)'
if discipline == 0 and parameterCategory == 3 and parameterNumber == 18 and scaleFactorOfFirstFixedSurface == -2 and typeOfGeneratingProcess == 197 and typeOfFirstFixedSurface == 106:
return 'smoothed forecast, soil temperature'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 56 and typeOfGeneratingProcess == 197 and typeOfStatisticalProcessing == 1 and typeOfFirstFixedSurface == 1:
return 'smoothed forecast, large-scale snowfall rate w.e.'
typeOfSecondFixedSurface = h.get_l('typeOfSecondFixedSurface')
scaledValueOfSecondFixedSurface = h.get_l('scaledValueOfSecondFixedSurface')
scaleFactorOfSecondFixedSurface = h.get_l('scaleFactorOfSecondFixedSurface')
if discipline == 0 and parameterCategory == 6 and parameterNumber == 22 and typeOfFirstFixedSurface == 100 and typeOfSecondFixedSurface == 100 and scaledValueOfFirstFixedSurface == 0 and scaledValueOfSecondFixedSurface == 400 and scaleFactorOfFirstFixedSurface == -2 and typeOfGeneratingProcess == 197 and scaleFactorOfSecondFixedSurface == -2:
return 'smoothed forecast, cloud cover high'
if discipline == 0 and parameterCategory == 6 and parameterNumber == 22 and typeOfFirstFixedSurface == 100 and typeOfSecondFixedSurface == 100 and scaledValueOfSecondFixedSurface == 800 and scaleFactorOfFirstFixedSurface == -2 and typeOfGeneratingProcess == 197 and scaleFactorOfSecondFixedSurface == -2 and scaledValueOfFirstFixedSurface == 400:
return 'smoothed forecast, cloud cover medium'
if discipline == 0 and parameterCategory == 6 and parameterNumber == 22 and typeOfFirstFixedSurface == 100 and typeOfSecondFixedSurface == 1 and scaleFactorOfFirstFixedSurface == -2 and typeOfGeneratingProcess == 197 and scaledValueOfFirstFixedSurface == 800:
return 'smoothed forecast, cloud cover low'
if discipline == 0 and parameterCategory == 6 and parameterNumber == 1 and typeOfFirstFixedSurface == 1 and typeOfGeneratingProcess == 197:
return 'smoothed forecast, total cloud cover'
if discipline == 0 and parameterCategory == 1 and parameterNumber == 52 and typeOfFirstFixedSurface == 1 and typeOfGeneratingProcess == 197 and typeOfStatisticalProcessing == 1:
return 'smoothed forecast, total precipitation rate'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 3 and scaledValueOfFirstFixedSurface == 10 and typeOfFirstFixedSurface == 103 and typeOfGeneratingProcess == 197 and scaleFactorOfFirstFixedSurface == 0:
return 'smoothed forecast, v comp. of wind'
if discipline == 0 and parameterCategory == 2 and parameterNumber == 2 and typeOfFirstFixedSurface == 103 and typeOfGeneratingProcess == 197 and scaleFactorOfFirstFixedSurface == 0 and scaledValueOfFirstFixedSurface == 10:
return 'smoothed forecast, u comp. of wind'
if discipline == 0 and parameterCategory == 0 and parameterNumber == 6 and typeOfGeneratingProcess == 197 and scaleFactorOfFirstFixedSurface == 0 and scaledValueOfFirstFixedSurface == 2 and typeOfFirstFixedSurface == 103:
return 'smoothed forecast, dew point temp.'
if discipline == 0 and parameterCategory == 0 and parameterNumber == 0 and typeOfGeneratingProcess == 197 and scaleFactorOfFirstFixedSurface == 0 and typeOfStatisticalProcessing == 3 and scaledValueOfFirstFixedSurface == 2 and typeOfFirstFixedSurface == 103:
return 'smoothed forecast, minimum temp.'
if discipline == 0 and parameterCategory == 0 and parameterNumber == 0 and typeOfGeneratingProcess == 197 and scaleFactorOfFirstFixedSurface == 0 and typeOfStatisticalProcessing == 2 and scaledValueOfFirstFixedSurface == 2 and typeOfFirstFixedSurface == 103:
return 'smoothed forecast, maximum temp.'
if discipline == 0 and parameterCategory == 0 and parameterNumber == 0 and scaledValueOfFirstFixedSurface == 2 and typeOfFirstFixedSurface == 103 and typeOfGeneratingProcess == 197 and scaleFactorOfFirstFixedSurface == 0:
return 'smoothed forecast, temperature'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 17 and instrumentType == 207 and satelliteSeries == 333 and scaledValueOfCentralWaveNumber == 136986 and satelliteNumber == 72:
return 'Synth. Sat. radiance clear sky'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 17 and scaledValueOfCentralWaveNumber == 161290 and satelliteNumber == 72 and instrumentType == 207 and satelliteSeries == 333:
return 'Synth. Sat. radiance clear sky'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 17 and scaledValueOfCentralWaveNumber == 103092 and satelliteNumber == 72 and instrumentType == 207 and satelliteSeries == 333:
return 'Synth. Sat. radiance clear sky'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 17 and instrumentType == 207 and satelliteSeries == 333 and scaledValueOfCentralWaveNumber == 114942 and satelliteNumber == 72:
return 'Synth. Sat. radiance clear sky'
if discipline == 3 and parameterCategory == 1 and parameterNumber == 17 and scaledValueOfCentralWaveNumber == 256410 and satelliteNumber == | |
<gh_stars>1-10
"`vision.data` manages data input pipeline - folderstransformbatch input. Includes support for classification, segmentation and bounding boxes"
from ..torch_core import *
from .image import *
from .transform import *
from ..data_block import *
from ..data_block import _df_to_fns_labels
from ..basic_data import *
from ..layers import CrossEntropyFlat
from concurrent.futures import ProcessPoolExecutor, as_completed
import PIL
__all__ = ['get_image_files', 'DatasetTfm', 'ImageClassificationDataset', 'ImageMultiDataset', 'ObjectDetectDataset',
'SegmentationDataset', 'ImageClassificationBase', 'denormalize', 'get_annotations', 'ImageDataBunch', 'ImageFileList', 'normalize',
'normalize_funcs', 'show_image_batch', 'transform_datasets', 'SplitDatasetsImage', 'channel_view',
'mnist_stats', 'cifar_stats', 'imagenet_stats', 'download_images', 'verify_images', 'bb_pad_collate']
image_extensions = set(k for k,v in mimetypes.types_map.items() if v.startswith('image/'))
def get_image_files(c:PathOrStr, check_ext:bool=True, recurse=False)->FilePathList:
"Return list of files in `c` that are images. `check_ext` will filter to `image_extensions`."
return get_files(c, extensions=image_extensions, recurse=recurse)
def get_annotations(fname, prefix=None):
"Open a COCO style json in `fname` and returns the lists of filenames (with maybe `prefix`) and labelled bboxes."
annot_dict = json.load(open(fname))
id2images, id2bboxes, id2cats = {}, collections.defaultdict(list), collections.defaultdict(list)
classes = {}
for o in annot_dict['categories']:
classes[o['id']] = o['name']
for o in annot_dict['annotations']:
bb = o['bbox']
id2bboxes[o['image_id']].append([bb[1],bb[0], bb[3]+bb[1], bb[2]+bb[0]])
id2cats[o['image_id']].append(classes[o['category_id']])
for o in annot_dict['images']:
if o['id'] in id2bboxes:
id2images[o['id']] = ifnone(prefix, '') + o['file_name']
ids = list(id2images.keys())
return [id2images[k] for k in ids], [[id2bboxes[k], id2cats[k]] for k in ids]
def show_image_batch(dl:DataLoader, classes:Collection[str], rows:int=None, figsize:Tuple[int,int]=(9,10))->None:
"Show a few images from a batch."
b_idx = next(iter(dl.batch_sampler))
if rows is None: rows = int(math.sqrt(len(b_idx)))
fig, axs = plt.subplots(rows,rows,figsize=figsize)
for i, ax in zip(b_idx[:rows*rows], axs.flatten()):
x,y = dl.dataset[i]
x.show(ax=ax, y=y, classes=classes)
plt.tight_layout()
class SplitDatasetsImage(SplitDatasets):
def transform(self, tfms:TfmList, **kwargs)->'SplitDatasets':
"Apply `tfms` to the underlying datasets, `kwargs` are passed to `DatasetTfm`."
assert not isinstance(self.train_ds, DatasetTfm)
self.train_ds = DatasetTfm(self.train_ds, tfms[0], **kwargs)
self.valid_ds = DatasetTfm(self.valid_ds, tfms[1], **kwargs)
if self.test_ds is not None:
self.test_ds = DatasetTfm(self.test_ds, tfms[1], **kwargs)
return self
def databunch(self, path:PathOrStr=None, **kwargs)->'ImageDataBunch':
"Create an `ImageDataBunch` from self, `path` will override `self.path`, `kwargs` are passed to `ImageDataBunch.create`."
path = Path(ifnone(path, self.path))
return ImageDataBunch.create(*self.datasets, path=path, **kwargs)
class ImageClassificationBase(LabelDataset):
__splits_class__ = SplitDatasetsImage
def __init__(self, fns:FilePathList, classes:Optional[Collection[Any]]=None):
super().__init__(classes=classes)
self.x = np.array(fns)
self.image_opener = open_image
def _get_x(self,i): return self.image_opener(self.x[i])
def new(self, *args, classes:Optional[Collection[Any]]=None, **kwargs):
if classes is None: classes = self.classes
res = self.__class__(*args, classes=classes, **kwargs)
return res
class ImageClassificationDataset(ImageClassificationBase):
"`Dataset` for folders of images in style {folder}/{class}/{images}."
def __init__(self, fns:FilePathList, labels:ImgLabels, classes:Optional[Collection[Any]]=None):
if classes is None: classes = uniqueify(labels)
super().__init__(fns, classes)
self.y = np.array([self.class2idx[o] for o in labels], dtype=np.int64)
self.loss_func = F.cross_entropy
@staticmethod
def _folder_files(folder:Path, label:ImgLabel, extensions:Collection[str]=image_extensions)->Tuple[FilePathList,ImgLabels]:
"From `folder` return image files and labels. The labels are all `label`. Only keep files with suffix in `extensions`."
fnames = get_files(folder, extensions=extensions)
return fnames,[label]*len(fnames)
@classmethod
def from_single_folder(cls, folder:PathOrStr, classes:Collection[Any], extensions:Collection[str]=image_extensions):
"Typically used for test set. Label all images in `folder` with suffix in `extensions` with `classes[0]`."
fns,labels = cls._folder_files(folder, classes[0], extensions=extensions)
return cls(fns, labels, classes=classes)
@classmethod
def from_folder(cls, folder:Path, classes:Optional[Collection[Any]]=None, valid_pct:float=0.,
extensions:Collection[str]=image_extensions)->Union['ImageClassificationDataset', List['ImageClassificationDataset']]:
"Dataset of `classes` labeled images in `folder`. Optional `valid_pct` split validation set."
if classes is None: classes = [cls.name for cls in find_classes(folder)]
fns,labels,keep = [],[],{}
for cl in classes:
f,l = cls._folder_files(folder/cl, cl, extensions=extensions)
fns+=f; labels+=l
keep[cl] = len(f)
classes = [cl for cl in classes if keep[cl]]
if valid_pct==0.: return cls(fns, labels, classes=classes)
return [cls(*a, classes=classes) for a in random_split(valid_pct, fns, labels)]
class ImageMultiDataset(ImageClassificationBase):
def __init__(self, fns:FilePathList, labels:ImgLabels, classes:Optional[Collection[Any]]=None):
if classes is None: classes = uniqueify(np.concatenate(labels))
super().__init__(fns, classes)
self.y = [np.array([self.class2idx[o] for o in l], dtype=np.int64) for l in labels]
self.loss_func = F.binary_cross_entropy_with_logits
def encode(self, x:Collection[int]):
"One-hot encode the target."
res = np.zeros((self.c,), np.float32)
res[x] = 1.
return res
def get_labels(self, idx:int)->ImgLabels: return [self.classes[i] for i in self.y[idx]]
def _get_y(self,i): return self.encode(self.y[i])
@classmethod
def from_single_folder(cls, folder:PathOrStr, classes:Collection[Any], extensions=image_extensions):
"Typically used for test set; label all images in `folder` with `classes[0]`."
fnames = get_files(folder, extensions=extensions)
labels = [[classes[0]]] * len(fnames)
return cls(fnames, labels, classes=classes)
@classmethod
def from_folder(cls, path:PathOrStr, folder:PathOrStr, fns:pd.Series, labels:ImgLabels, valid_pct:float=0.2,
classes:Optional[Collection[Any]]=None):
path = Path(path)
folder_path = (path/folder).absolute()
train,valid = random_split(valid_pct, f'{folder_path}/' + fns, labels)
train_ds = cls(*train, classes=classes)
return [train_ds,cls(*valid, classes=train_ds.classes)]
class SegmentationDataset(ImageClassificationBase):
"A dataset for segmentation task."
def __init__(self, x:FilePathList, y:FilePathList, classes:Collection[Any]):
assert len(x)==len(y)
super().__init__(x, classes)
self.y = np.array(y)
self.loss_func = CrossEntropyFlat()
self.mask_opener = open_mask
def _get_y(self,i): return self.mask_opener(self.y[i])
class ObjectDetectDataset(ImageClassificationBase):
"A dataset with annotated images."
def __init__(self, x_fns:Collection[Path], labelled_bbs:Collection[Tuple[Collection[int], str]], classes:Collection[str]=None):
assert len(x_fns)==len(labelled_bbs)
if classes is None:
classes = set()
for lbl_bb in labelled_bbs: classes = classes.union(set(lbl_bb[1]))
classes = ['background'] + list(classes)
super().__init__(x_fns,classes)
self.labelled_bbs = labelled_bbs
def _get_y(self,i):
#TODO: find a smart way to not reopen the x image.
cats = LongTensor([self.class2idx[l] for l in self.labelled_bbs[i][1]])
return (ImageBBox.create(self.labelled_bbs[i][0], *self._get_x(i).size, cats))
@classmethod
def from_json(cls, folder, fname, valid_pct=None, classes=None):
"""Create an `ObjectDetectDataset` by looking at the images in `folder` according to annotations in the json `fname`.
If `valid_pct` is passed, split a training and validation set. `classes` is the list of classes."""
imgs, labelled_bbox = get_annotations(fname, prefix=f'{folder}/')
if valid_pct:
train,valid = random_split(valid_pct, imgs, labelled_bbox)
train_ds = cls(*train, classes=classes)
return train_ds, cls(*valid, classes=train_ds.classes)
return cls(imgs, labelled_bbox, classes=classes)
def bb_pad_collate(samples:BatchSamples, pad_idx:int=0) -> Tuple[FloatTensor, Tuple[LongTensor, LongTensor]]:
"Function that collect samples and adds padding."
max_len = max([len(s[1].data[1]) for s in samples])
bboxes = torch.zeros(len(samples), max_len, 4)
labels = torch.zeros(len(samples), max_len).long() + pad_idx
imgs = []
for i,s in enumerate(samples):
imgs.append(s[0].data[None])
bbs, lbls = s[1].data
bboxes[i,-len(lbls):] = bbs
labels[i,-len(lbls):] = lbls
return torch.cat(imgs,0), (bboxes,labels)
def _prep_tfm_kwargs(tfms, kwargs):
default_rsz = ResizeMethod.SQUISH if ('size' in kwargs and is_listy(kwargs['size'])) else ResizeMethod.CROP
resize_method = getattr(kwargs, 'resize_method', default_rsz)
if resize_method <= 2: tfms = [crop_pad()] + tfms
kwargs['resize_method'] = resize_method
return tfms, kwargs
class DatasetTfm(Dataset):
"`Dataset` that applies a list of transforms to every item drawn."
def __init__(self, ds:Dataset, tfms:TfmList=None, tfm_y:bool=False, **kwargs:Any):
"this dataset will apply `tfms` to `ds`"
self.ds,self.tfm_y = ds,tfm_y
self.tfms,self.kwargs = _prep_tfm_kwargs(tfms,kwargs)
self.y_kwargs = {**self.kwargs, 'do_resolve':False}
def __len__(self)->int: return len(self.ds)
def __repr__(self)->str: return f'{self.__class__.__name__}({self.ds})'
def __getitem__(self,idx:int)->Tuple[ItemBase,Any]:
"Return tfms(x),y."
x,y = self.ds[idx]
x = apply_tfms(self.tfms, x, **self.kwargs)
if self.tfm_y: y = apply_tfms(self.tfms, y, **self.y_kwargs)
return x, y
def __getattr__(self,k):
"Passthrough access to wrapped dataset attributes."
return getattr(self.ds, k)
def _transform_dataset(self, tfms:TfmList=None, tfm_y:bool=False, **kwargs:Any)->DatasetTfm:
return DatasetTfm(self, tfms=tfms, tfm_y=tfm_y, **kwargs)
DatasetBase.transform = _transform_dataset
def transform_datasets(train_ds:Dataset, valid_ds:Dataset, test_ds:Optional[Dataset]=None,
tfms:Optional[Tuple[TfmList,TfmList]]=None, resize_method:ResizeMethod=None, **kwargs:Any):
"Create train, valid and maybe test DatasetTfm` using `tfms` = (train_tfms,valid_tfms)."
tfms = ifnone(tfms, [[],[]])
res = [DatasetTfm(train_ds, tfms[0], resize_method=resize_method, **kwargs),
DatasetTfm(valid_ds, tfms[1], resize_method=resize_method, **kwargs)]
if test_ds is not None: res.append(DatasetTfm(test_ds, tfms[1], resize_method=resize_method, **kwargs))
return res
def normalize(x:TensorImage, mean:FloatTensor,std:FloatTensor)->TensorImage:
"Normalize `x` with `mean` and `std`."
return (x-mean[...,None,None]) / std[...,None,None]
def denormalize(x:TensorImage, mean:FloatTensor,std:FloatTensor)->TensorImage:
"Denormalize `x` with `mean` and `std`."
return x*std[...,None,None] + mean[...,None,None]
def _normalize_batch(b:Tuple[Tensor,Tensor], mean:FloatTensor, std:FloatTensor, do_y:bool=False)->Tuple[Tensor,Tensor]:
"`b` = `x`,`y` - normalize `x` array of imgs and `do_y` optionally `y`."
x,y = b
mean,std = mean.to(x.device),std.to(x.device)
x = normalize(x,mean,std)
if do_y: y = normalize(y,mean,std)
return x,y
def normalize_funcs(mean:FloatTensor, std:FloatTensor)->Tuple[Callable,Callable]:
"Create normalize/denormalize func using `mean` and `std`, can specify `do_y` and `device`."
mean,std = tensor(mean),tensor(std)
return (partial(_normalize_batch, mean=mean, std=std),
partial(denormalize, mean=mean, std=std))
cifar_stats = ([0.491, 0.482, 0.447], [0.247, 0.243, 0.261])
imagenet_stats = ([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
mnist_stats = ([0.15]*3, [0.15]*3)
def channel_view(x:Tensor)->Tensor:
"Make channel the first axis of `x` and flatten remaining axes"
return x.transpose(0,1).contiguous().view(x.shape[1],-1)
def _get_fns(ds, path):
"List of all file names relative to `path`."
return [str(fn.relative_to(path)) for fn in ds.x]
class ImageDataBunch(DataBunch):
@classmethod
def create(cls, train_ds, valid_ds, test_ds=None, path:PathOrStr='.', bs:int=64, ds_tfms:Optional[TfmList]=None,
num_workers:int=defaults.cpus, tfms:Optional[Collection[Callable]]=None, device:torch.device=None,
collate_fn:Callable=data_collate, size:int=None, **kwargs)->'ImageDataBunch':
"Factory method. `bs` batch size, `ds_tfms` for `Dataset`, `tfms` for `DataLoader`."
datasets = [train_ds,valid_ds]
if test_ds is not None: datasets.append(test_ds)
if ds_tfms or size: datasets = transform_datasets(*datasets, tfms=ds_tfms, size=size, **kwargs)
dls = [DataLoader(*o, num_workers=num_workers) for o in
zip(datasets, (bs,bs*2,bs*2), (True,False,False))]
return cls(*dls, path=path, device=device, tfms=tfms, collate_fn=collate_fn)
@classmethod
def from_folder(cls, path:PathOrStr, train:PathOrStr='train', valid:PathOrStr='valid',
test:Optional[PathOrStr]=None, valid_pct=None, **kwargs:Any)->'ImageDataBunch':
"Create from imagenet style dataset in `path` with `train`,`valid`,`test` subfolders (or provide `valid_pct`)."
path=Path(path)
if valid_pct is None:
train_ds = ImageClassificationDataset.from_folder(path/train)
datasets = [train_ds, ImageClassificationDataset.from_folder(path/valid, classes=train_ds.classes)]
else: datasets = ImageClassificationDataset.from_folder(path/train, valid_pct=valid_pct)
if test: datasets.append(ImageClassificationDataset.from_single_folder(
path/test,classes=datasets[0].classes))
return cls.create(*datasets, path=path, **kwargs)
@classmethod
def from_df(cls, path:PathOrStr, df:pd.DataFrame, folder:PathOrStr='.', sep=None, valid_pct:float=0.2,
fn_col:int=0, label_col:int=1, test:Optional[PathOrStr]=None, suffix:str=None, **kwargs:Any)->'ImageDataBunch':
"Create from a DataFrame."
path = Path(path)
fnames, labels = _df_to_fns_labels(df, suffix=suffix, label_delim=sep, fn_col=fn_col, label_col=label_col)
if sep:
classes = uniqueify(np.concatenate(labels))
datasets = ImageMultiDataset.from_folder(path, folder, fnames, labels, valid_pct=valid_pct, classes=classes)
if test: datasets.append(ImageMultiDataset.from_single_folder(path/test, classes=datasets[0].classes))
else:
folder_path = (path/folder).absolute()
(train_fns,train_lbls), (valid_fns,valid_lbls) = random_split(valid_pct, f'{folder_path}/' + fnames, labels)
classes = uniqueify(labels)
datasets = [ImageClassificationDataset(train_fns, train_lbls, classes)]
datasets.append(ImageClassificationDataset(valid_fns, valid_lbls, classes))
if | |
# pylint: disable=invalid-name
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-arguments
"""
Class for managing integrating spheres.
import iadpython.sphere
s = iadpython.sphere.Sphere(250,20)
print(s)
"""
import numpy as np
class Sphere():
"""Container class for an integrating sphere."""
def __init__(self, d_sphere, d_sample, d_entrance=0, d_detector=0, r_detector=0, r_wall=1):
"""Object initialization."""
self._d_sphere = d_sphere
self._d_sample = d_sample
self._d_entrance = d_entrance
self._d_detector = d_detector
self.a_sample = self.relative_cap_area(d_sample)
self.a_detector = self.relative_cap_area(d_detector)
self.a_entrance = self.relative_cap_area(d_entrance)
self._a_wall = 1 - self.a_sample - self.a_entrance - self.a_detector
self.r_detector = r_detector
self.r_wall = r_wall
def cap_area(self, d_port):
"""Calculate area of spherical cap."""
R = self.d_sphere / 2
r = d_port / 2
h = R - np.sqrt(R**2 - r**2)
return 2 * np.pi * R * h
def approx_relative_cap_area(self, d_port):
"""Calculate approx relative area of spherical cap."""
R = self.d_sphere / 2
r = d_port / 2
return r**2 / (4 * R**2)
def relative_cap_area(self, d_port):
"""Calculate relative area of spherical cap."""
# R = self.d_sphere/2
# r = d_port/2
# h = R - np.sqrt(R**2-r**2)
# return 2*np.pi*R*h / (4*np.pi*R**2)
h = (self.d_sphere - np.sqrt(self.d_sphere**2 - d_port**2)) / 2
return h / self.d_sphere
def __str__(self):
"""Return basic details as a string for printing."""
s = ""
s += "Sphere diameter = %.1f mm\n" % self._d_sphere
s += "Port diameters \n"
s += " sample = %.1f mm\n" % self._d_sample
s += " entrance = %.1f mm\n" % self._d_entrance
s += " detector = %.1f mm\n" % self._d_detector
s += "Diffuse reflectivities \n"
s += " wall = %.1f%%\n" % self.r_wall
s += " detector = %.1f%%\n" % self.r_detector
return s
def gain(self, URU, r_wall=None):
"""
Determine the gain for this integrating sphere.
The gain G(URU) on the irradiance on the detector (relative
to a perfectly black sphere) is
G(URU) = (P_d /A_d) / (P/A)
See sphere.ipynb for details on the derivation.
Args:
URU: total reflectance for diffuse illumination
r_wall: fractional wall reflectivity
Returns:
gain on detector caused by multiple bounces in sphere
"""
if r_wall is None:
r_wall = self.r_wall
tmp = self.a_detector * self.r_detector + self.a_sample * URU
tmp = r_wall * (self._a_wall + (1 - self.a_entrance) * tmp)
if tmp == 1.0:
G = r_wall
else:
G = r_wall * (1.0 + tmp / (1.0 - tmp))
return G
def multiplier(self, UR1=None, URU=None, r_wall=None):
"""
Determine the average reflectance of a sphere.
The idea here is that UR1 is the reflection of the incident light
for the first bounce. Three cases come to mind
1. If the light hits the sample first, then UR1 should be
the sample reflectance for collimated illumination.
2. If light hits the sphere wall first, then UR1 should be the wall
reflectance.
3. If light is enters the sphere completely diffuse then UR1=1
As defined by LabSphere, "Technical Guide: integrating Sphere Theory
and application" using equation 14
Args:
UR1: sample reflectance for normal irradiance
URU: sample reflectance for diffuse irradiance
r_wall: wall reflectance
Returns:
sphere muliplier
"""
if r_wall is None:
r_wall = self.r_wall
if UR1 is None:
UR1 = r_wall
if URU is None:
URU = UR1
denom = 1
denom -= self._a_wall * r_wall
denom -= self.a_sample * URU
denom -= self.a_detector * self.r_detector
return UR1 / denom
@property
def d_sphere(self):
"""Getter property for sphere diameter."""
return self._d_sphere
@d_sphere.setter
def d_sphere(self, value):
"""When size is changed ratios become invalid."""
assert self.d_sample <= value, "sphere must be bigger than sample port"
assert self.d_entrance <= value, "sphere must be bigger than entrance port"
assert self.d_detector <= value, "sphere must be bigger than detector port"
self._d_sphere = value
self.a_sample = self.relative_cap_area(self._d_sample)
self.a_detector = self.relative_cap_area(self._d_detector)
self.a_entrance = self.relative_cap_area(self._d_entrance)
self._a_wall = 1 - self.a_sample - self.a_entrance - self.a_detector
@property
def d_sample(self):
"""Getter property for sample port diameter."""
return self._d_sample
@d_sample.setter
def d_sample(self, value):
"""When size is changed ratios become invalid."""
assert 0 <= value <= self._d_sphere, "sample port must be between 0 and sphere diameter"
self._d_sample = value
self.a_sample = self.relative_cap_area(value)
self._a_wall = 1 - self.a_sample - self.a_entrance - self.a_detector
@property
def d_entrance(self):
"""Getter property for entrance port diameter."""
return self._d_entrance
@d_entrance.setter
def d_entrance(self, value):
"""When size is changed ratios become invalid."""
assert 0 <= value <= self._d_sphere, "entrance port must be between 0 and sphere diameter"
self._d_entrance = value
self.a_entrance = self.relative_cap_area(value)
self._a_wall = 1 - self.a_sample - self.a_entrance - self.a_detector
@property
def d_detector(self):
"""Getter property for detector port diameter."""
return self._d_detector
@d_detector.setter
def d_detector(self, value):
"""When size is changed ratios become invalid."""
assert 0 <= value <= self._d_sphere, "detector port must be between 0 and sphere diameter"
self._d_detector = value
self.a_detector = self.relative_cap_area(value)
self._a_wall = 1 - self.a_sample - self.a_entrance - self.a_detector
@property
def a_wall(self):
"""Getter property for detector port diameter."""
return self._a_wall
@a_wall.setter
def a_wall(self, value):
"""Changing the relative wall area is a bit crazy."""
assert 0 <= value <= 1, "relative wall are must be between 0 and 1"
# Find the diameter of a spherical cap assuming all non-wall
# port area is assigned to a single sample port
assert 0 < value < 1, "0 < relative wall area < 1"
self._d_sample = 2 * self._d_sphere * np.sqrt(value - value**2)
self._d_entrance = 0
self._d_detector = 0
self.a_entrance = 0
self.a_detector = 0
self._a_wall = value
self.a_sample = 1 - value
def Gain_11(RS, TS, URU, tdiffuse):
"""
Net gain for on detector in reflection sphere for two sphere configuration.
The light on the detector in the reflectance sphere is affected by interactions
between the two spheres. This function calculates the net gain on a detector
in the reflection sphere for diffuse light starting in the reflectance sphere.
G₁₁ = (P₁/Ad) / (P/A)
then the full expression for the gain is
G(r_s)/(1-a_s a_s' r_w r_w' (1-a_e)(1-a_e') G(r_s) G'(r_s)t_s²)
"""
G = RS.gain(URU)
GP = TS.gain(URU)
areas = RS.a_sample * TS.a_sample * (1 - RS.a_entrance) * (1 - TS.a_entrance)
G11 = G / (1 - areas * RS.r_wall * TS.r_wall * G * GP * tdiffuse**2)
return G11
def Gain_22(RS, TS, URU, tdiffuse):
"""
Two sphere gain in T sphere for light starting in T sphere.
Similarly, when the light starts in the second sphere, the gain for light
on the detector in the second sphere $G_{22}$ is found by switching
all primed variables to unprimed. Thus $G_{21}(r_s,t_s)$ is
$$
G_{22}(r_s,t_s) = {G'(r_s) over 1-a_s a_s' r_w r_w'
(1-a_e)(1-a_e') G(r_s) G'(r_s)t_s² }
$$
"""
G = RS.gain(URU)
GP = TS.gain(URU)
areas = RS.a_sample * TS.a_sample * (1 - RS.a_entrance) * (1 - TS.a_entrance)
G22 = GP / (1 - areas * RS.r_wall * TS.r_wall * G * GP * tdiffuse**2)
return G22
def Two_Sphere_R(RS, TS, UR1, URU, UT1, UTU, f=0):
"""
Total gain in R sphere for two sphere configuration.
The light on the detector in the reflection sphere arises from three
sources: the fraction of light directly reflected off the sphere wall
f r_w² (1-a_e) P,
the fraction of light reflected by the sample
(1-f) rdirect r_w² (1-a_e) P,
and the light transmitted through the sample
(1-f) tdirect r_w' (1-a_e') P,
If we use the gain for each part then we add
G₁₁ * a_d (1-a_e) r_w² f P
to
G₁₁ * a_d (1-a_e) r_w (1-f) rdirect P
and
G₂₁ * a_d (1-a_e') r_w' (1-f) tdirect P
which simplifies slightly to the formula used below
"""
GP = TS.gain(URU)
G11 = Gain_11(RS, TS, URU, UTU)
x = RS.a_detector * (1 - RS.a_entrance) * RS.r_wall * G11
p1 = (1 - f) * UR1
p2 = RS.r_wall * f
p3 = (1 - f) * TS.a_sample * (1 - TS.a_entrance) * TS.r_wall * UT1 * UTU * GP
return x * (p1 + p2 + p3)
def Two_Sphere_T(RS, TS, UR1, URU, UT1, UTU, f=0):
"""
Total gain in T sphere for two sphere configuration.
For the power on the detector in the transmission (second) sphere we
have the same three sources. The only difference is that the subscripts
on the gain | |
dtype np.uint32
Image decoded as a 32 bit RBGA image.
"""
# Ensure it has three channels
if im.ndim != 3 or im.shape[2] !=3:
raise RuntimeError('Input image is not RGB.')
# Make sure all entries between zero and one
if (im < 0).any() or (im > 1).any():
raise RuntimeError('All pixel values must be between 0 and 1.')
# Get image shape
n, m, _ = im.shape
# Convert to 8-bit, which is expected for viewing
with warnings.catch_warnings():
warnings.simplefilter('ignore')
im_8 = skimage.img_as_ubyte(im)
# Add the alpha channel, which is expected by Bokeh
im_rgba = np.stack((*np.rollaxis(im_8, 2),
255*np.ones((n, m), dtype=np.uint8)), axis=2)
# Reshape into 32 bit. Must flip up/down for proper orientation
if flip:
return np.flipud(im_rgba.view(dtype=np.int32).reshape((n, m)))
else:
return im_rgba.view(dtype=np.int32).reshape((n, m))
def rgb_frac_to_hex(rgb_frac):
"""
Convert fractional RGB values to hexidecimal color string.
Parameters
----------
rgb_frac : array_like, shape (3,)
Fractional RGB values; each entry is between 0 and 1.
Returns
-------
str
Hexidecimal string for the given RGB color.
Examples
--------
>>> rgb_frac_to_hex((0.65, 0.23, 1.0))
'#a53aff'
>>> rgb_frac_to_hex((1.0, 1.0, 1.0))
'#ffffff'
"""
if len(rgb_frac) != 3:
raise RuntimeError('`rgb_frac` must have exactly three entries.')
if (np.array(rgb_frac) < 0).any() or (np.array(rgb_frac) > 1).any():
raise RuntimeError('RGB values must be between 0 and 1.')
return '#{0:02x}{1:02x}{2:02x}'.format(int(rgb_frac[0] * 255),
int(rgb_frac[1] * 255),
int(rgb_frac[2] * 255))
def corner(trace, vars=None, labels=None, datashade=True, plot_width=150,
smooth=1, bins=20, cmap='black', contour_color='black',
hist_color='black', alpha=1, bins_2d=50, plot_ecdf=False,
plot_width_correction=50, plot_height_correction=40, levels=None,
weights=None, show_contours=True, extend_contour_domain=False):
"""
Make a corner plot of MCMC results. Heavily influenced by the corner
package by <NAME>.
Parameters
----------
trace : PyMC3 Trace or MultiTrace instance or Pandas DataFrame
Trace of MCMC sampler.
vars : list
List of variables as strings included in `trace` to construct
corner plot.
labels : list, default None
List of labels for the respective variables given in `vars`. If
None, the variable names from `vars` are used.
datashade : bool, default True
Whether or not to convert sampled points to a raster image using
Datashader. For almost all applications, this should be true.
Otherwise, you will try to render thousands and thousands of
points.
plot_width : int, default 150
Width of each plot in the corner plot in pixels. The height is
computed from the width to make the plots roughly square.
smooth : int or None, default 1
Width of smoothing kernel for making contours.
bins : int, default 20
Number of binds to use in constructing histograms. Ignored if
`plot_ecdf` is True.
cmap : str, default 'black'
Valid colormap string for DataShader and for coloring Bokeh
glyphs.
contour_color : str, default 'black'
Color of contour lines
hist_color : str, default 'black'
Color of histogram lines
alpha : float, default 1.0
Opacity of glyphs. Ignored if `datashade` is True.
bins_2d : int, default 50
Number of bins in each direction for binning 2D histograms when
computing contours
plot_ecdf : bool, default False
If True, plot ECDFs of samples on the diagonal of the corner
plot. If False, histograms are plotted.
plot_width_correction : int, default 50
Correction for width of plot taking into account tick and axis
labels.
plot_height_correction : int, default 40
Correction for height of plot taking into account tick and axis
labels.
levels : list of floats, default None
Levels to use when constructing contours. By default, these are
chosen according to this principle from <NAME>:
http://corner.readthedocs.io/en/latest/pages/sigmas.html
weights : default None
Value to pass as `weights` kwarg to np.histogram2d().
show_contours : bool, default True
If True, show contour plot on top of samples.
extend_contour_domain : bool, default False
If True, extend the domain of the contours a little bit beyond
the extend of the samples. This is done in the corner module,
but I prefer not to do it.
Returns
-------
output : Bokeh gridplot
Corner plot as a Bokeh gridplot.
"""
if vars is None:
raise RuntimeError('Must specify vars.')
if type(vars) not in (list, tuple):
raise RuntimeError('`vars` must be a list or tuple.')
if type(trace) == pd.core.frame.DataFrame:
df = trace
else:
df = pm.trace_to_dataframe(trace)
if len(vars) > 6:
raise RuntimeError(
'For space purposes, can show only six variables.')
for col in vars:
if col not in df.columns:
raise RuntimeError(
'Column ' + col + ' not in the columns of DataFrame.')
if labels is None:
labels = vars
elif len(labels) != len(vars):
raise RuntimeError('len(vars) must equal len(labels)')
if len(vars) == 1:
x = vars[0]
if plot_ecdf:
if datashade:
if plot_width == 150:
plot_height = 200
plot_width = 300
else:
plot_width = 200
plot_height=200
x_range, _ = _data_range(df, vars[0], vars[0])
p = bokeh.plotting.figure(
x_range=x_range, y_range=[-0.02, 1.02],
plot_width=plot_width, plot_height=plot_height)
x_ecdf, y_ecdf = _ecdf_vals(df[vars[0]], formal=True)
df_ecdf = pd.DataFrame(data={vars[0]: x_ecdf, 'ECDF': y_ecdf})
_ = datashader.bokeh_ext.InteractiveImage(
p, _create_line_image, df=df_ecdf,
x=x, y='ECDF', cmap=hist_color)
else:
return ecdf(df[vars[0]], formal=True,
line_width=2, line_color=hist_color)
else:
return histogram(df[vars[0]],
bins=bins,
density=True,
line_width=2,
color=hist_color,
x_axis_label=vars[0])
if not datashade:
if len(df) > 10000:
raise RuntimeError(
'Cannot render more than 10,000 samples without DataShader.')
elif len(df) > 1000:
warnings.warn(
'Rendering so many points without DataShader is ill-advised.')
plots = [[None for _ in range(len(vars))] for _ in range(len(vars))]
for i, j in zip(*np.tril_indices(len(vars))):
pw = plot_width
ph = plot_width
if j == 0:
pw += plot_width_correction
if i == len(vars) - 1:
ph += plot_height_correction
x = vars[j]
if i != j:
y = vars[i]
x_range, y_range = _data_range(df, x, y)
plots[i][j] = bokeh.plotting.figure(
x_range=x_range, y_range=y_range,
plot_width=pw, plot_height=ph)
if datashade:
_ = datashader.bokeh_ext.InteractiveImage(
plots[i][j], _create_points_image, df=df, x=x, y=y,
cmap=cmap)
else:
plots[i][j].circle(df[x], df[y], size=2,
alpha=alpha, color=cmap)
if show_contours:
xs, ys = _get_contour_lines_from_samples(
df[x].values,
df[y].values,
bins=bins_2d,
smooth=smooth,
levels=levels,
weights=weights,
extend_domain=extend_contour_domain)
plots[i][j].multi_line(xs, ys, line_color=contour_color,
line_width=2)
else:
if plot_ecdf:
x_range, _ = _data_range(df, x, x)
plots[i][i] = bokeh.plotting.figure(
x_range=x_range, y_range=[-0.02, 1.02],
plot_width=pw, plot_height=ph)
if datashade:
x_ecdf, y_ecdf = _ecdf_vals(df[x], formal=True)
df_ecdf = pd.DataFrame(data={x: x_ecdf, 'ECDF': y_ecdf})
_ = datashader.bokeh_ext.InteractiveImage(
plots[i][i], _create_line_image, df=df_ecdf,
x=x, y='ECDF', cmap=hist_color)
else:
plots[i][i] = ecdf(df[x], p=plots[i][i], formal=True,
line_width=2, line_color=hist_color)
else:
x_range, _ = _data_range(df, x, x)
plots[i][i] = bokeh.plotting.figure(
x_range=x_range, plot_width=pw, plot_height=ph)
f, e = np.histogram(df[x], bins=bins, density=True)
e0 = np.empty(2*len(e))
f0 = np.empty(2*len(e))
e0[::2] = e
e0[1::2] = e
f0[0] = 0
f0[-1] = 0
f0[1:-1:2] = f
f0[2:-1:2] = f
plots[i][i].line(e0, f0, line_width=2, color=hist_color)
# Link axis ranges
for i in range(1,len(vars)):
for j in range(i):
plots[i][j].x_range = plots[j][j].x_range
plots[i][j].y_range = plots[i][i].x_range
# Label axes
for i, label in enumerate(labels):
plots[-1][i].xaxis.axis_label = label
for i, label in enumerate(labels[1:]):
plots[i+1][0].yaxis.axis_label = label
if plot_ecdf:
plots[0][0].yaxis.axis_label = 'ECDF'
# Take off tick labels
for i in range(len(vars)-1):
for j in range(i+1):
plots[i][j].xaxis.major_label_text_font_size = '0pt'
if not plot_ecdf:
plots[0][0].yaxis.major_label_text_font_size = '0pt'
for i in range(1, len(vars)):
for j in range(1, i+1):
plots[i][j].yaxis.major_label_text_font_size = '0pt'
grid = bokeh.layouts.gridplot(plots, toolbar_location='left',
toolbar_sticky=False)
return grid
def contour(X, Y, Z, levels=None, p=None, overlaid=False, plot_width=350,
plot_height=300, x_axis_label='x', y_axis_label='y', title=None,
line_color=None, line_width=2, color_mapper=None,
overlay_grid=False, fill=False, fill_palette=None,
fill_alpha=0.75, **kwargs):
"""
Make a contour plot, possibly overlaid on an image.
Parameters
----------
X : 2D Numpy array
Array of x-values, as would be produced using np.meshgrid()
Y : 2D Numpy array
Array of y-values, as would be produced using np.meshgrid()
Z : 2D Numpy array
Array of z-values.
levels : array_like
Levels to plot, ranging from 0 to 1. The contour around a given
level contains that fraction of the total probability if the
contour plot is for a 2D probability density function. By
default, the levels are given by the one, two, three, and four
sigma levels corresponding to a marginalized distribution from
a 2D Gaussian distribution.
p : bokeh plotting object, default None
If not None, the contour are added to `p`. This option is not
allowed if `overlaid` is True.
overlaid : bool, default False
If True, `Z` is displayed as an image and the contours are
overlaid.
plot_width : int, default 350
| |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
TODO:
- Speed tests, need to be certain the looping on all telescopes is not killing
performance
- Introduce new weighting schemes
- Make intersect_lines code more readable
"""
import numpy as np
import itertools
import astropy.units as u
from ctapipe.reco.reco_algorithms import Reconstructor
from ctapipe.io.containers import ReconstructedShowerContainer
from ctapipe.coordinates import NominalFrame, HorizonFrame
from ctapipe.coordinates import TiltedGroundFrame, project_to_ground
from ctapipe.instrument import get_atmosphere_profile_functions
__all__ = [
'HillasIntersection'
]
class HillasIntersection(Reconstructor):
"""
This class is a simple re-implementation of Hillas parameter based event
reconstruction. e.g. https://arxiv.org/abs/astro-ph/0607333
In this case the Hillas parameters are all constructed in the shared
angular ( Nominal) system. Direction reconstruction is performed by
extrapolation of the major axes of the Hillas parameters in the nominal
system and the weighted average of the crossing points is taken. Core
reconstruction is performed by performing the same procedure in the
tilted ground system.
The height of maximum is reconstructed by the projection os the image
centroid onto the shower axis, taking the weighted average of all images.
Uncertainties on the positions are provided by taking the spread of the
crossing points, however this means that no uncertainty can be provided
for multiplicity 2 events.
"""
def __init__(self, atmosphere_profile_name="paranal"):
# We need a conversion function from height above ground to depth of maximum
# To do this we need the conversion table from CORSIKA
_ = get_atmosphere_profile_functions(atmosphere_profile_name)
self.thickness_profile, self.altitude_profile = _
def predict(self, hillas_parameters, tel_x, tel_y, array_direction):
"""
Parameters
----------
hillas_parameters: dict
Dictionary containing Hillas parameters for all telescopes
in reconstruction
tel_x: dict
Dictionary containing telescope position on ground for all
telescopes in reconstruction
tel_y: dict
Dictionary containing telescope position on ground for all
telescopes in reconstruction
array_direction: HorizonFrame
Pointing direction of the array
Returns
-------
ReconstructedShowerContainer:
"""
src_x, src_y, err_x, err_y = self.reconstruct_nominal(hillas_parameters)
core_x, core_y, core_err_x, core_err_y = self.reconstruct_tilted(
hillas_parameters, tel_x, tel_y)
err_x *= u.rad
err_y *= u.rad
nom = NominalFrame(x=src_x * u.rad, y=src_y * u.rad,
array_direction=array_direction)
horiz = nom.transform_to(HorizonFrame())
result = ReconstructedShowerContainer()
result.alt, result.az = horiz.alt, horiz.az
tilt = TiltedGroundFrame(x=core_x * u.m, y=core_y * u.m,
pointing_direction=array_direction)
grd = project_to_ground(tilt)
result.core_x = grd.x
result.core_y = grd.y
x_max = self.reconstruct_xmax(nom.x, nom.y,
tilt.x, tilt.y,
hillas_parameters,
tel_x, tel_y,
90 * u.deg - array_direction.alt)
result.core_uncert = np.sqrt(core_err_x * core_err_x
+ core_err_y * core_err_y) * u.m
result.tel_ids = [h for h in hillas_parameters.keys()]
result.average_size = np.mean([h.intensity for h in hillas_parameters.values()])
result.is_valid = True
src_error = np.sqrt(err_x * err_x + err_y * err_y)
result.alt_uncert = src_error.to(u.deg)
result.az_uncert = src_error.to(u.deg)
result.h_max = x_max
result.h_max_uncert = np.nan
result.goodness_of_fit = np.nan
return result
def reconstruct_nominal(self, hillas_parameters, weighting="Konrad"):
"""
Perform event reconstruction by simple Hillas parameter intersection
in the nominal system
Parameters
----------
hillas_parameters: dict
Hillas parameter objects
weighting: string
Specify image weighting scheme used (HESS or Konrad style)
Returns
-------
Reconstructed event position in the nominal system
"""
if len(hillas_parameters) < 2:
return None # Throw away events with < 2 images
# Find all pairs of Hillas parameters
combos = itertools.combinations(list(hillas_parameters.values()), 2)
hillas_pairs = list(combos)
# Copy parameters we need to a numpy array to speed things up
h1 = list(
map(
lambda h: [h[0].psi.to(u.rad).value,
h[0].x.value,
h[0].y.value,
h[0].intensity], hillas_pairs
)
)
h1 = np.array(h1)
h1 = np.transpose(h1)
h2 = list(
map(lambda h: [h[1].psi.to(u.rad).value,
h[1].x.value,
h[1].y.value,
h[1].intensity], hillas_pairs)
)
h2 = np.array(h2)
h2 = np.transpose(h2)
# Perform intersection
sx, sy = self.intersect_lines(h1[1], h1[2], h1[0],
h2[1], h2[2], h2[0])
if weighting == "Konrad":
weight_fn = self.weight_konrad
elif weighting == "HESS":
weight_fn = self.weight_HESS
# Weight by chosen method
weight = weight_fn(h1[3], h2[3])
# And sin of interception angle
weight *= self.weight_sin(h1[0], h2[0])
# Make weighted average of all possible pairs
x_pos = np.average(sx, weights=weight)
y_pos = np.average(sy, weights=weight)
var_x = np.average((sx - x_pos) ** 2, weights=weight)
var_y = np.average((sy - y_pos) ** 2, weights=weight)
# Copy into nominal coordinate
return x_pos, y_pos, np.sqrt(var_x), np.sqrt(var_y)
def reconstruct_tilted(self, hillas_parameters, tel_x, tel_y,
weighting="Konrad"):
"""
Core position reconstruction by image axis intersection in the tilted
system
Parameters
----------
hillas_parameters: dict
Hillas parameter objects
tel_x: dict
Telescope X positions, tilted system
tel_y: dict
Telescope Y positions, tilted system
weighting: str
Weighting scheme for averaging of crossing points
Returns
-------
(float, float, float, float):
core position X, core position Y, core uncertainty X,
core uncertainty X
"""
if len(hillas_parameters) < 2:
return None # Throw away events with < 2 images
h = list()
tx = list()
ty = list()
# Need to loop here as dict is unordered
for tel in hillas_parameters.keys():
h.append(hillas_parameters[tel])
tx.append(tel_x[tel])
ty.append(tel_y[tel])
# Find all pairs of Hillas parameters
hillas_pairs = list(itertools.combinations(h, 2))
tel_x = list(itertools.combinations(tx, 2))
tel_y = list(itertools.combinations(ty, 2))
tx = np.zeros((len(tel_x), 2))
ty = np.zeros((len(tel_y), 2))
for i, _ in enumerate(tel_x):
tx[i][0], tx[i][1] = tel_x[i][0].value, tel_x[i][1].value
ty[i][0], ty[i][1] = tel_y[i][0].value, tel_y[i][1].value
tel_x = np.array(tx)
tel_y = np.array(ty)
# Copy parameters we need to a numpy array to speed things up
h1 = map(lambda h: [h[0].psi.to(u.rad).value, h[0].intensity], hillas_pairs)
h1 = np.array(list(h1))
h1 = np.transpose(h1)
h2 = map(lambda h: [h[1].psi.to(u.rad).value, h[1].intensity], hillas_pairs)
h2 = np.array(list(h2))
h2 = np.transpose(h2)
# Perform intersection
cx, cy = self.intersect_lines(tel_x[:, 0], tel_y[:, 0], h1[0],
tel_x[:, 1], tel_y[:, 1], h2[0])
if weighting == "Konrad":
weight_fn = self.weight_konrad
elif weighting == "HESS":
weight_fn = self.weight_HESS
# Weight by chosen method
weight = weight_fn(h1[1], h2[1])
# And sin of interception angle
weight *= self.weight_sin(h1[0], h2[0])
# Make weighted average of all possible pairs
x_pos = np.average(cx, weights=weight)
y_pos = np.average(cy, weights=weight)
var_x = np.average((cx - x_pos) ** 2, weights=weight)
var_y = np.average((cy - y_pos) ** 2, weights=weight)
return x_pos, y_pos, np.sqrt(var_x), np.sqrt(var_y)
def reconstruct_xmax(self, source_x, source_y, core_x, core_y,
hillas_parameters, tel_x, tel_y, zen):
"""
Geometrical depth of shower maximum reconstruction, assuming the shower
maximum lies at the image centroid
Parameters
----------
source_x: float
Source X position in nominal system
source_y: float
Source Y position in nominal system
core_x: float
Core X position in nominal system
core_y: float
Core Y position in nominal system
hillas_parameters: dict
Dictionary of hillas parameters objects
tel_x: dict
Dictionary of telescope X positions
tel_y: dict
Dictionary of telescope X positions
zen: float
Zenith angle of shower
Returns
-------
float:
Estimated depth of shower maximum
"""
cog_x = list()
cog_y = list()
amp = list()
tx = list()
ty = list()
# Loops over telescopes in event
for tel in hillas_parameters.keys():
cog_x.append(hillas_parameters[tel].x.to(u.rad).value)
cog_y.append(hillas_parameters[tel].y.to(u.rad).value)
amp.append(hillas_parameters[tel].intensity)
tx.append(tel_x[tel].to(u.m).value)
ty.append(tel_y[tel].to(u.m).value)
height = get_shower_height(source_x.to(u.rad).value,
source_y.to(u.rad).value,
np.array(cog_x),
np.array(cog_y),
core_x.to(u.m).value,
core_y.to(u.m).value,
np.array(tx),
np.array(ty))
weight = np.array(amp)
mean_height = np.sum(height * weight) / np.sum(weight)
# This value is height above telescope in the tilted system,
# we should convert to height above ground
mean_height *= np.cos(zen)
# Add on the height of the detector above sea level
mean_height += 2100 # TODO: replace with instrument info
if mean_height > 100000 or np.isnan(mean_height):
mean_height = 100000
mean_height *= u.m
# Lookup this height in the depth tables, the convert Hmax to Xmax
x_max = self.thickness_profile(mean_height.to(u.km))
# Convert to slant depth
x_max /= np.cos(zen)
return x_max
@staticmethod
def intersect_lines(xp1, yp1, phi1, xp2, yp2, phi2):
"""
Perform intersection of two lines. This code is borrowed from read_hess.
Parameters
----------
xp1: ndarray
X position of first image
yp1: ndarray
Y position of first image
phi1: ndarray
Rotation angle of first image
xp2: ndarray
X position of second image
yp2: ndarray
Y position of second image
phi2: ndarray
Rotation angle of second image
Returns
-------
ndarray of x and y crossing points for all pairs
"""
sin_1 = np.sin(phi1)
cos_1 = np.cos(phi1)
a1 = sin_1
b1 = -1 * cos_1
c1 = yp1 * cos_1 - xp1 * sin_1
sin_2 = np.sin(phi2)
cos_2 = np.cos(phi2)
a2 = sin_2
b2 = -1 * cos_2
c2 = yp2 * cos_2 - xp2 * sin_2
det_ab = (a1 * b2 - a2 * b1)
det_bc = (b1 * c2 - b2 * c1)
det_ca = (c1 * a2 - c2 * a1)
# if math.fabs(det_ab) < 1e-14 : # /* parallel */
| |
spawn
num_subprocesses = int(num_processes / our_num_processes)
# Similarly, the memory fraction for each process we will spawn
subprocess_mem_frac = mem_frac / our_num_processes
log.debug('flat_combine: {} num_processes = {}, mem_frac = {}, our_num_processes = {}, num_subprocesses = {}, subprocess_mem_frac = {}'.format(directory, num_processes, mem_frac, our_num_processes, num_subprocesses, subprocess_mem_frac))
# Combining files is the slow part, so we want the maximum of
# processes doing that in parallel
log.debug(f'flat_combine: {directory}, nfdicts = {nfdicts}, our_num_processes = {our_num_processes}')
# Number of sub-processes in each process we will spawn
num_subprocesses = int(num_processes / our_num_processes)
# Similarly, the memory fraction for each process we will spawn
subprocess_mem_frac = mem_frac / our_num_processes
log.debug('flat_combine: {} num_processes = {}, mem_frac = {}, our_num_processes = {}, num_subprocesses = {}, subprocess_mem_frac = {}'.format(directory, num_processes, mem_frac, our_num_processes, num_subprocesses, subprocess_mem_frac))
wwk = WorkerWithKwargs(flat_combine_one_fdict,
num_processes=num_subprocesses,
mem_frac=subprocess_mem_frac,
**kwargs)
if nfdicts == 1:
for fdict in fdict_list:
wwk.worker(fdict)
else:
with NestablePool(processes=our_num_processes) as p:
p.map(wwk.worker, fdict_list)
######### Calibration object
def dir_has_calibration(directory, glob_include, subdirs=None):
"""Returns True if directory has calibration files matching pattern(s)
in glob_include. Optionally checks subdirs"""
if not os.path.isdir(directory):
# This is the end of our recursive line
return False
if subdirs is None:
subdirs = []
for sd in subdirs:
subdir = os.path.join(directory, sd)
if dir_has_calibration(subdir, glob_include):
return True
# If we made it here, our subdirs had no calibration files or we
# have been called recursively and are in one
for gi in glob_include:
flist = glob.glob(os.path.join(directory, gi))
if len(flist) > 0:
return True
return False
class Lockfile():
def __init__(self,
fname=None,
check_every=10):
assert fname is not None
self._fname = fname
self.check_every = check_every
@property
def is_set(self):
return os.path.isfile(self._fname)
# --> could add a timeout and a user-specified optional message
def wait(self):
if not self.is_set:
return
while self.is_set:
with open(self._fname, "r") as f:
log.error(f'lockfile {self._fname} detected for {f.read()}')
time.sleep(self.check_every)
log.error(f'(error cleared) lockfile {self._fname} removed')
def create(self):
self.wait()
with open(self._fname, "w") as f:
f.write('PID: ' + str(os.getpid()))
def clear(self):
os.remove(self._fname)
class Calibration():
"""Class for conducting CCD calibrations"""
def __init__(self,
reduce=False,
raw_data_root=RAW_DATA_ROOT,
calibration_root=CALIBRATION_ROOT,
subdirs=CALIBRATION_SUBDIRS,
keep_intermediate=False,
ccdt_tolerance=CCDT_TOLERANCE,
dark_exp_margin=DARK_EXP_MARGIN,
start_date=None,
stop_date=None,
gain_correct=True, # This is gain correcting the bias and dark
num_processes=MAX_NUM_PROCESSES,
mem_frac=MAX_MEM_FRAC,
num_ccdts=NUM_CCDTS,
num_dark_exptimes=NUM_DARK_EXPTIMES,
num_filts=NUM_FILTS,
num_calibration_files=NUM_CALIBRATION_FILES,
naxis1=sx694.naxis1,
naxis2=sx694.naxis2,
bitpix=MAX_CCDDATA_BITPIX,
process_expand_factor=COR_PROCESS_EXPAND_FACTOR,
griddata_expand_factor=GRIDDATA_EXPAND_FACTOR,
bias_glob=BIAS_GLOB,
dark_glob=DARK_GLOB,
flat_glob=FLAT_GLOB,
flat_cut=FLAT_CUT,
nd_edge_expand=ND_EDGE_EXPAND,
lockfile=LOCKFILE):
self._raw_data_root = raw_data_root
self._calibration_root = calibration_root
self._subdirs = subdirs
self.keep_intermediate = keep_intermediate
self._ccdt_tolerance = ccdt_tolerance
self._dark_exp_margin=dark_exp_margin
self._bias_table = None
self._dark_table = None
self._flat_table = None
# gain_correct is set only in the biases and propagated
# through the rest of the pipeline in cor_process
self._gain_correct = gain_correct
self._bias_glob = assure_list(bias_glob)
self._dark_glob = assure_list(dark_glob)
self._flat_glob = assure_list(flat_glob)
self._lockfile = lockfile
self.flat_cut = flat_cut
self.nd_edge_expand = nd_edge_expand
self.num_processes = num_processes
self.mem_frac = mem_frac
self.num_ccdts = num_ccdts
self.num_dark_exptimes = num_dark_exptimes
self.num_filts = num_filts
self.num_calibration_files = num_calibration_files
self.naxis1 = naxis1
self.naxis2 = naxis2
self.bitpix = bitpix
self.process_expand_factor = process_expand_factor
self.griddata_expand_factor = griddata_expand_factor
if start_date is None:
self._start_date = datetime.datetime(1,1,1)
else:
self._start_date = datetime.datetime.strptime(start_date,
"%Y-%m-%d")
if stop_date is None:
# Make stop time tomorrow in case we are analyzing on the
# UT boundary
self._stop_date = datetime.datetime.today() + datetime.timedelta(days=1)
else:
self._stop_date = datetime.datetime.strptime(stop_date, "%Y-%m-%d")
assert self._start_date <= self._stop_date
# These need to be on a per-instantiation basis, since they
# depend on our particular start-stop range. These are also
# important, since we don't take calibrations every night. The
# cost of checking for new reductions is relatively low, since
# it is mostly a directory listing exercise
self._bias_dirs_dates_checked = None
self._dark_dirs_dates_checked = None
self._flat_dirs_dates_checked = None
if reduce:
self.reduce()
@property
def gain_correct(self):
return self._gain_correct
def dirs_dates_to_reduce(self, table_creator,
glob_include,
dirs_dates_checked=None,
subdirs=None):
to_check = get_dirs_dates(self._raw_data_root,
start=self._start_date,
stop=self._stop_date)
# See if we have reduced/checked any/everything in this
# instantiation. This is not as efficient as it could be
# since we have sorted lists, but we don't have many elements,
# so there is not much point in getting fancier
if dirs_dates_checked is not None:
to_check = [dt for dt in to_check
if not dt in dirs_dates_checked]
if len(to_check) == 0:
return []
# Take any reductions on disk out of the list. Note, we check
# for date only, since we have lost the original directory
# information once reduced
tbl = table_creator(autoreduce=False, rescan=True)
if tbl is not None:
reduced_ts = [tm.to_datetime() for tm in tbl['dates']]
# Remove duplicates
reduced_ts = list(set(reduced_ts))
to_check = [dt for dt in to_check
if not dt[1] in reduced_ts]
if len(to_check) == 0:
return []
to_reduce = [dt for dt in to_check
if dir_has_calibration(dt[0],
glob_include,
subdirs=subdirs)]
# Remove duplicates
return sorted(list(set(to_reduce)))
def reduce_bias(self):
dirs_dates = \
self.dirs_dates_to_reduce(self.bias_table_create,
self._bias_glob,
self._bias_dirs_dates_checked,
self._subdirs)
ndirs_dates = len(dirs_dates)
if ndirs_dates == 0:
return
# If we made it here, we have some real work to do
# Set a simple lockfile so we don't have multiple processes reducing
lock = Lockfile(self._lockfile)
lock.create()
one_fdict_size = (self.num_calibration_files
* self.naxis1 * self.naxis2
* self.bitpix/8
* self.process_expand_factor)
ncp = num_can_process(self.num_ccdts,
num_processes=self.num_processes,
mem_frac=self.mem_frac,
process_size=self.num_ccdts * one_fdict_size,
error_if_zero=False)
our_num_processes = max(1, ncp)
num_subprocesses = int(self.num_processes / our_num_processes)
subprocess_mem_frac = self.mem_frac / our_num_processes
log.debug(f'Calibration.reduce_bias: ndirs_dates = {ndirs_dates}')
log.debug('Calibration.reduce_bias: self.num_processes = {}, our_num_processes = {}, num_subprocesses = {}, subprocess_mem_frac = {}'.format(self.num_processes, our_num_processes, num_subprocesses, subprocess_mem_frac))
#return
wwk = WorkerWithKwargs(bias_combine,
subdirs=self._subdirs,
glob_include=self._bias_glob,
outdir=self._calibration_root,
auto=True, # A little dangerous, but just one place for changes
gain_correct=self._gain_correct,
num_processes=self.num_processes,
naxis1=self.naxis1,
naxis2=self.naxis2,
process_expand_factor=self.process_expand_factor,
num_calibration_files=self.num_calibration_files,
mem_frac=self.mem_frac,
keep_intermediate=self.keep_intermediate)
dirs = [dt[0] for dt in dirs_dates]
if our_num_processes == 1:
for d in dirs:
wwk.worker(d)
else:
with NestablePool(processes=our_num_processes) as p:
p.map(wwk.worker, dirs)
self.bias_table_create(rescan=True, autoreduce=False)
# This could potentially get set in dirs_dates_to_reduce, but
# it seems better to set it after we have actually done the work
all_dirs_dates = get_dirs_dates(self._raw_data_root,
start=self._start_date,
stop=self._stop_date)
self._bias_dirs_dates_checked = all_dirs_dates
lock.clear()
def reduce_dark(self):
dirs_dates = \
self.dirs_dates_to_reduce(self.dark_table_create,
self._dark_glob,
self._dark_dirs_dates_checked,
self._subdirs)
ndirs_dates = len(dirs_dates)
if ndirs_dates == 0:
return
# If we made it here, we have some real work to do
# Set a simple lockfile so we don't have multiple processes reducing
lock = Lockfile(self._lockfile)
lock.create()
one_fdict_size = (self.num_calibration_files
* self.naxis1 * self.naxis2
* self.bitpix/8
* self.process_expand_factor)
ncp = num_can_process(self.num_ccdts,
num_processes=self.num_processes,
mem_frac=self.mem_frac,
process_size=self.num_ccdts * one_fdict_size,
error_if_zero=False)
our_num_processes = max(1, ncp)
num_subprocesses = int(self.num_processes / our_num_processes)
subprocess_mem_frac = self.mem_frac / our_num_processes
log.debug(f'Calibration.reduce_dark: ndirs_dates = {ndirs_dates}')
log.debug('Calibration.reduce_dark: self.num_processes = {}, our_num_processes = {}, num_subprocesses = {}, subprocess_mem_frac = {}'.format(self.num_processes, our_num_processes, num_subprocesses, subprocess_mem_frac))
#return
wwk = WorkerWithKwargs(dark_combine,
subdirs=self._subdirs,
glob_include=self._dark_glob,
outdir=self._calibration_root,
calibration=self,
auto=True, # A little dangerous, but just one place for changes
num_processes=self.num_processes,
naxis1=self.naxis1,
naxis2=self.naxis2,
process_expand_factor=self.process_expand_factor,
num_calibration_files=self.num_calibration_files,
mem_frac=self.mem_frac,
keep_intermediate=self.keep_intermediate)
dirs = [dt[0] for dt in dirs_dates]
if our_num_processes == 1:
for d in dirs:
wwk.worker(d)
else:
with NestablePool(processes=our_num_processes) as p:
p.map(wwk.worker, dirs)
self.dark_table_create(rescan=True, autoreduce=False)
# This could potentially get set in dirs_dates_to_reduce, but
# it seems better to set it after we have actually done the work
all_dirs_dates = get_dirs_dates(self._raw_data_root,
start=self._start_date,
stop=self._stop_date)
self._dark_dirs_dates_checked = all_dirs_dates
lock.clear()
def reduce_flat(self):
dirs_dates = \
self.dirs_dates_to_reduce(self.flat_table_create,
self._flat_glob,
self._flat_dirs_dates_checked,
self._subdirs)
ndirs_dates = len(dirs_dates)
if ndirs_dates == 0:
return
# If we made it here, we have some real work to do
# Set a simple lockfile so we don't have multiple processes reducing
lock = Lockfile(self._lockfile)
lock.create()
one_filt_size = (self.num_calibration_files
* self.naxis1 * self.naxis2
* self.bitpix/8
* self.griddata_expand_factor)
# Our sub-process can divide and conquer if necessary
ncp = num_can_process(self.num_filts,
num_processes=self.num_processes,
mem_frac=self.mem_frac,
process_size=self.num_filts * one_filt_size,
error_if_zero=False)
our_num_processes = max(1, ncp)
num_subprocesses = int(self.num_processes / our_num_processes)
subprocess_mem_frac = self.mem_frac / our_num_processes
log.debug(f'Calibration.reduce_flat: ndirs_dates = {ndirs_dates}')
log.debug('Calibration.reduce_flat: self.num_processes = {}, our_num_processes = {}, num_subprocesses = {}, subprocess_mem_frac = {}'.format(self.num_processes, our_num_processes, num_subprocesses, subprocess_mem_frac))
wwk = WorkerWithKwargs(flat_combine,
subdirs=self._subdirs,
glob_include=self._flat_glob,
outdir=self._calibration_root,
calibration=self,
auto=True, # A little dangerous, but just one place for changes
num_processes=self.num_processes,
mem_frac=self.mem_frac,
num_calibration_files=self.num_calibration_files,
naxis1=self.naxis1,
naxis2=self.naxis2,
griddata_expand_factor=self.griddata_expand_factor,
keep_intermediate=self.keep_intermediate,
flat_cut=self.flat_cut,
nd_edge_expand=self.nd_edge_expand)
dirs = [dt[0] for dt in dirs_dates]
if our_num_processes == 1:
for d in dirs:
wwk.worker(d)
else:
with NestablePool(processes=our_num_processes) as p:
p.map(wwk.worker, dirs)
self.flat_table_create(rescan=True, autoreduce=False)
# This could potentially get set in dirs_dates_to_reduce, but
# it seems better to set it after we have actually done the work
all_dirs_dates = get_dirs_dates(self._raw_data_root,
start=self._start_date,
stop=self._stop_date)
| |
<filename>venv/lib/python3.6/site-packages/ansible_collections/cloudscale_ch/cloud/plugins/modules/server.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, <NAME> <<EMAIL>>
# Copyright: (c) 2019, <NAME> <<EMAIL>>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: server
short_description: Manages servers on the cloudscale.ch IaaS service
description:
- Create, update, start, stop and delete servers on the cloudscale.ch IaaS service.
notes:
- If I(uuid) option is provided, it takes precedence over I(name) for server selection. This allows to update the server's name.
- If no I(uuid) option is provided, I(name) is used for server selection. If more than one server with this name exists, execution is aborted.
- Only the I(name) and I(flavor) are evaluated for the update.
- The option I(force=true) must be given to allow the reboot of existing running servers for applying the changes.
author:
- <NAME> (@gaudenz)
- <NAME> (@resmo)
- <NAME> (@href)
version_added: "1.0.0"
options:
state:
description:
- State of the server.
choices: [ running, stopped, absent ]
default: running
type: str
name:
description:
- Name of the Server.
- Either I(name) or I(uuid) are required.
type: str
uuid:
description:
- UUID of the server.
- Either I(name) or I(uuid) are required.
type: str
flavor:
description:
- Flavor of the server.
type: str
image:
description:
- Image used to create the server.
type: str
zone:
description:
- Zone in which the server resides (e.g. C(lgp1) or C(rma1)).
type: str
volume_size_gb:
description:
- Size of the root volume in GB.
default: 10
type: int
bulk_volume_size_gb:
description:
- Size of the bulk storage volume in GB.
- No bulk storage volume if not set.
type: int
ssh_keys:
description:
- List of SSH public keys.
- Use the full content of your .pub file here.
type: list
elements: str
password:
description:
- Password for the server.
type: str
use_public_network:
description:
- Attach a public network interface to the server.
type: bool
use_private_network:
description:
- Attach a private network interface to the server.
type: bool
use_ipv6:
description:
- Enable IPv6 on the public network interface.
default: yes
type: bool
interfaces:
description:
- List of network interface objects specifying the interfaces to be attached to the server.
See U(https://www.cloudscale.ch/en/api/v1/#interfaces-attribute-specification) for more details.
type: list
elements: dict
version_added: 1.4.0
suboptions:
network:
description:
- Create a network interface on the network identified by UUID.
Use 'public' instead of an UUID to attach a public network interface.
Can be omitted if a subnet is provided under addresses.
type: str
addresses:
description:
- Attach a private network interface and configure a subnet and/or an IP address.
type: list
elements: dict
suboptions:
subnet:
description:
- UUID of the subnet from which an address will be assigned.
type: str
address:
description:
- The static IP address of the interface. Use '[]' to avoid assigning an IP address via DHCP.
type: str
server_groups:
description:
- List of UUID or names of server groups.
type: list
elements: str
user_data:
description:
- Cloud-init configuration (cloud-config) data to use for the server.
type: str
force:
description:
- Allow to stop the running server for updating if necessary.
default: no
type: bool
tags:
description:
- Tags assosiated with the servers. Set this to C({}) to clear any tags.
type: dict
extends_documentation_fragment: cloudscale_ch.cloud.api_parameters
'''
EXAMPLES = '''
# Create and start a server with an existing server group (shiny-group)
- name: Start cloudscale.ch server
cloudscale_ch.cloud.server:
name: my-shiny-cloudscale-server
image: debian-10
flavor: flex-4
ssh_keys: ssh-rsa XXXXXXXXXX...XXXX ansible@cloudscale
server_groups: shiny-group
zone: lpg1
use_private_network: True
bulk_volume_size_gb: 100
api_token: xxxxxx
# Start another server in anti-affinity (server group shiny-group)
- name: Start second cloudscale.ch server
cloudscale_ch.cloud.server:
name: my-other-shiny-server
image: ubuntu-16.04
flavor: flex-8
ssh_keys: ssh-rsa XXXXXXXXXXX ansible@cloudscale
server_groups: shiny-group
zone: lpg1
api_token: xxxxxx
# Force to update the flavor of a running server
- name: Start cloudscale.ch server
cloudscale_ch.cloud.server:
name: my-shiny-cloudscale-server
image: debian-10
flavor: flex-8
force: yes
ssh_keys: ssh-rsa XXXXXXXXXX...XXXX ansible@cloudscale
use_private_network: True
bulk_volume_size_gb: 100
api_token: xxxxxx
register: server1
# Stop the first server
- name: Stop my first server
cloudscale_ch.cloud.server:
uuid: '{{ server1.uuid }}'
state: stopped
api_token: xxxxxx
# Delete my second server
- name: Delete my second server
cloudscale_ch.cloud.server:
name: my-other-shiny-server
state: absent
api_token: xxxxxx
# Start a server and wait for the SSH host keys to be generated
- name: Start server and wait for SSH host keys
cloudscale_ch.cloud.server:
name: my-cloudscale-server-with-ssh-key
image: debian-10
flavor: flex-4
ssh_keys: ssh-rsa XXXXXXXXXXX ansible@cloudscale
api_token: <PASSWORD>
register: server
until: server is not failed
retries: 5
delay: 2
# Start a server with two network interfaces:
#
# A public interface with IPv4/IPv6
# A private interface on a specific private network with an IPv4 address
- name: Start a server with a public and private network interface
cloudscale_ch.cloud.server:
name: my-cloudscale-server-with-two-network-interfaces
image: debian-10
flavor: flex-4
ssh_keys: ssh-rsa XXXXXXXXXXX ansible@cloudscale
api_token: xxxxxx
interfaces:
- network: 'public'
- addresses:
- subnet: UUID_of_private_subnet
# Start a server with a specific IPv4 address from subnet range
- name: Start a server with a specific IPv4 address from subnet range
cloudscale_ch.cloud.server:
name: my-cloudscale-server-with-specific-address
image: debian-10
flavor: flex-4
ssh_keys: ssh-rsa XXXXXXXXXXX ansible@cloudscale
api_token: xxxxxx
interfaces:
- addresses:
- subnet: UUID_of_private_subnet
address: 'A.B.C.D'
# Start a server with two network interfaces:
#
# A public interface with IPv4/IPv6
# A private interface on a specific private network with no IPv4 address
- name: Start a server with a private network interface and no IP address
cloudscale_ch.cloud.server:
name: my-cloudscale-server-with-specific-address
image: debian-10
flavor: flex-4
ssh_keys: ssh-rsa XXXXXXXXXXX ansible@cloudscale
api_token: xxxxxx
interfaces:
- network: 'public'
- network: UUID_of_private_network
addresses: []
'''
RETURN = '''
href:
description: API URL to get details about this server
returned: success when not state == absent
type: str
sample: https://api.cloudscale.ch/v1/servers/cfde831a-4e87-4a75-960f-89b0148aa2cc
uuid:
description: The unique identifier for this server
returned: success
type: str
sample: cfde831a-4e87-4a75-960f-89b0148aa2cc
name:
description: The display name of the server
returned: success
type: str
sample: its-a-me-mario.cloudscale.ch
state:
description: The current status of the server
returned: success
type: str
sample: running
flavor:
description: The flavor that has been used for this server
returned: success when not state == absent
type: dict
sample: { "slug": "flex-4", "name": "Flex-4", "vcpu_count": 2, "memory_gb": 4 }
image:
description: The image used for booting this server
returned: success when not state == absent
type: dict
sample: { "default_username": "ubuntu", "name": "Ubuntu 18.04 LTS", "operating_system": "Ubuntu", "slug": "ubuntu-18.04" }
zone:
description: The zone used for booting this server
returned: success when not state == absent
type: dict
sample: { 'slug': 'lpg1' }
volumes:
description: List of volumes attached to the server
returned: success when not state == absent
type: list
sample: [ {"type": "ssd", "device": "/dev/vda", "size_gb": "50"} ]
interfaces:
description: List of network ports attached to the server
returned: success when not state == absent
type: list
sample: [ { "type": "public", "addresses": [ ... ] } ]
ssh_fingerprints:
description: A list of SSH host key fingerprints. Will be null until the host keys could be retrieved from the server.
returned: success when not state == absent
type: list
sample: ["ecdsa-sha2-nistp256 SHA256:XXXX", ... ]
ssh_host_keys:
description: A list of SSH host keys. Will be null until the host keys could be retrieved from the server.
returned: success when not state == absent
type: list
sample: ["ecdsa-sha2-nistp256 XXXXX", ... ]
server_groups:
description: List of server groups
returned: success when not state == absent
type: list
sample: [ {"href": "https://api.cloudscale.ch/v1/server-groups/...", "uuid": "...", "name": "db-group"} ]
tags:
description: Tags assosiated with the server.
returned: success
type: dict
sample: { 'project': 'my project' }
'''
from datetime import datetime, timedelta
from time import sleep
from copy import deepcopy
from ansible.module_utils.basic import AnsibleModule
from ..module_utils.api import (
AnsibleCloudscaleBase,
cloudscale_argument_spec,
)
ALLOWED_STATES = ('running',
'stopped',
'absent',
)
class AnsibleCloudscaleServer(AnsibleCloudscaleBase):
def __init__(self, module):
super(AnsibleCloudscaleServer, self).__init__(module)
# Initialize server dictionary
self._info = {}
def _init_server_container(self):
return {
'uuid': self._module.params.get('uuid') or self._info.get('uuid'),
'name': self._module.params.get('name') or self._info.get('name'),
'state': 'absent',
}
def _get_server_info(self, refresh=False):
if self._info and not refresh:
return self._info
self._info = self._init_server_container()
uuid = self._info.get('uuid')
if uuid is not None:
server_info = self._get('servers/%s' % uuid)
if server_info:
self._info = self._transform_state(server_info)
else:
name = self._info.get('name')
if name is not None:
servers = self._get('servers') or []
matching_server = []
for server in servers:
if server['name'] == name:
matching_server.append(server)
if len(matching_server) == 1:
| |
from numpy import array
def scigrid_2011_01_06_00():
ppc = {"version": '2'}
ppc["baseMVA"] = 100.0
ppc["bus"] = array([
[586, 3, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[589, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[590, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[593, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[595, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[598, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[599, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[602, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[603, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[607, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[608, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[609, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[612, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[614, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[616, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[617, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[618, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[619, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[624, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[629, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[632, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[637, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[638, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[640, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[641, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[642, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[643, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[647, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[652, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[655, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[663, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[666, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[672, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[676, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[681, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[683, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[687, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[696, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[697, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[698, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[702, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[705, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[707, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[713, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[714, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[716, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[717, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[719, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[722, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[724, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[727, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[728, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[730, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[732, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[735, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[738, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[741, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[742, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[747, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[748, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[749, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[750, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[758, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[761, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[762, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[765, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[767, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[774, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[777, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[778, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[781, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[784, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[785, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[787, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[788, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[789, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[791, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[792, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[795, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[800, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[801, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[802, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[805, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[806, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[808, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[809, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[811, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[814, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[816, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[817, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[821, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[822, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[826, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[834, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[835, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[836, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[837, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[839, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[841, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[843, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[844, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[850, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[851, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[853, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[856, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[857, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[858, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[860, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[865, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[867, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[870, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[872, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[873, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[874, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[875, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[877, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[882, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[883, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[885, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[886, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[889, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[890, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[895, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[896, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[898, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[900, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[902, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[903, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[905, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[906, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[907, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[909, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[917, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[918, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[920, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[921, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[923, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[925, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[931, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[936, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[937, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[939, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[940, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[944, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[950, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[952, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[958, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[959, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[960, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[963, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[968, 2, 0, 0, 0, 0, 0, 0.999618, 0, 220.0, 0, 1.1, 0.9 ],
[969, 2, 0, 0, 0, 0, 0, 0.999618, 0, 220.0, 0, 1.1, 0.9 ],
[971, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[973, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[976, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[978, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[982, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[984, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[985, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[986, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[987, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[988, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[993, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[994, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[995, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[997, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[999, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1000, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1002, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1003, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1007, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1010, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1011, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1012, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1027, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1028, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1029, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1030, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1031, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1032, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1034, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1035, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1036, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1037, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1038, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1039, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1040, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1041, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1042, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1043, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1044, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1045, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1046, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1047, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1048, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1049, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1050, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1051, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1052, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1053, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1054, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1055, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1056, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1057, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1058, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1059, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1060, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1061, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1062, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1063, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1064, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1065, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1066, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1067, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1068, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1069, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1070, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1071, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1072, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1073, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1074, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1075, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1076, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1077, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1078, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1079, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1080, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1081, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1082, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1083, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1084, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1085, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1086, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1087, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1088, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1089, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1090, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1091, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1092, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1093, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1094, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1095, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1096, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1097, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1098, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1099, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1100, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1101, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1102, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1103, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1104, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1105, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1106, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1107, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1108, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1109, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1110, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1111, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1112, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1113, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1114, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1115, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1116, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1117, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1118, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1119, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1120, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1121, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1122, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1123, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1124, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1125, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1126, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1127, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1128, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1129, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1130, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1131, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1132, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1133, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1134, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1135, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1136, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1137, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1138, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1139, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1140, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1141, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1142, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1143, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1144, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1146, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1147, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1148, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1149, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1150, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1151, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1152, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1153, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1154, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1155, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1156, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1157, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1158, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1159, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1160, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1161, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1162, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1163, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1164, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1165, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1166, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1167, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1168, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1169, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1170, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1171, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1172, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1173, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1174, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1175, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1176, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1177, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1178, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1179, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1180, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1181, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1182, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1183, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1184, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1185, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1186, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1187, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1188, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1190, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1191, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1192, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1193, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1194, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1195, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1196, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1197, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1198, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1199, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1200, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1201, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1202, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1203, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1204, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1205, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1206, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1207, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1208, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1209, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1210, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1211, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1212, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1213, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1214, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1215, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1216, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1217, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1218, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1219, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1220, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1221, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1222, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1223, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1224, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1225, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1226, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1227, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1228, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1229, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1230, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1231, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1232, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1233, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1235, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1236, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1237, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1238, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1239, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1240, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1241, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1242, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1243, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1244, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1245, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1246, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1247, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1248, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1249, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1250, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1251, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1252, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1253, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1254, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1255, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1256, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1257, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1258, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1259, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1260, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1261, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1262, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1263, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1264, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1265, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1266, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1267, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1268, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1269, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1270, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1271, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1272, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1273, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1274, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1275, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1276, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1277, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1278, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1279, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1280, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1281, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1282, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1283, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1284, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1285, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1286, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1287, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1288, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1289, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1290, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1291, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1292, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1293, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1294, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1295, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1296, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1297, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1298, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1299, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1300, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1301, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1302, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1303, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1304, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1305, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1306, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1307, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1308, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1309, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1310, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1311, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1312, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1313, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1314, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1315, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1316, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1317, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1318, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1319, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1320, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1321, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1322, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1323, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1324, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1325, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1326, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1327, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1328, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1329, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1330, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1331, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1332, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1333, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1334, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1335, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1336, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1337, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1338, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1339, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1340, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1341, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1342, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1343, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1344, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1345, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1346, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1347, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1348, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1349, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1350, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1351, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1352, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1354, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1355, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1356, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1357, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1358, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1359, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1360, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1361, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1362, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1363, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1364, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1365, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1366, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1367, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1368, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1369, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1370, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1371, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1372, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1373, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1374, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1375, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1376, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1377, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1378, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1379, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1380, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1381, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1382, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1383, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1384, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1385, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1386, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1387, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1388, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1389, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1390, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1391, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1392, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1393, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1394, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1395, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1396, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1397, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1398, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1399, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1400, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1401, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1402, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1403, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1404, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1405, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1406, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1407, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1408, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1409, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1411, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1412, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1413, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1414, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1415, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1416, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1417, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1418, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1419, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1420, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1421, 2, 0, 0, 0, 0, 0, 0.999618, 0, 220.0, 0, 1.1, 0.9 ],
[1422, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1423, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1424, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1425, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1426, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1427, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1428, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1429, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1430, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1431, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1432, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1433, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1434, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1435, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1436, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1437, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1438, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1439, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1440, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1441, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1442, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1443, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1444, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1445, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1446, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1447, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1448, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1449, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1450, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1451, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1452, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1453, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1454, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1455, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1456, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1457, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1458, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1459, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1460, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1461, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1462, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1463, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1464, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1465, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1466, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1467, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1468, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1469, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1470, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1471, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1472, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1473, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1474, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1475, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1476, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1477, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1478, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1479, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1480, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1481, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1482, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1483, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1484, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1485, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1486, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1487, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1488, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1489, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1490, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1491, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1492, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1493, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1494, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1495, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1496, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1497, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1498, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1499, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1500, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1501, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1502, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1503, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1504, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1505, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1506, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1507, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1508, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1510, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1511, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1512, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1513, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1514, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1516, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1517, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1518, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1519, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1, 1, 259.857723, 51.971545, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[2, 1, 0, 0, 0, 0, 0, 1.000014, 0, 380.0, 0, 1.1, 0.9 ],
[3, 1, 45.546069, 9.109214, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[4, 1, 74.902021, 14.980404, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[5, 1, 0, 0, 0, 0, 0, 0.999026, 0, 380.0, 0, 1.1, 0.9 ],
[6, 1, 219.943383, 43.988677, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[7, 1, 165.75469, 33.150938, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[8, 1, 138.691682, 27.738336, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[9, 1, 93.795017, 18.759003, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[10, 1, 0, 0, 0, 0, 0, 1.000954, 0, 380.0, 0, 1.1, 0.9 ],
[11, 1, 82.180424, 16.436085, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[12, 1, 0, 0, 0, 0, 0, 1.000759, 0, 380.0, 0, 1.1, 0.9 ],
[13, 1, 0, 0, 0, 0, 0, 1.000371, 0, 380.0, 0, 1.1, 0.9 ],
[14, 1, 196.545426, 39.309085, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[15, 1, 0, 0, 0, 0, 0, 1.000504, 0, 380.0, 0, 1.1, 0.9 ],
[16, 1, 335.201053, 67.040211, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[17, 1, 78.948654, 15.789731, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[18, 1, 0, 0, 0, 0, 0, 1.002044, 0, 380.0, 0, 1.1, 0.9 ],
[19, 1, 195.052362, 39.010472, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[20, 1, 0, 0, 0, 0, 0, 0.997098, 0, 380.0, 0, 1.1, 0.9 ],
[21, 1, 838.755076, 167.751015, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[22, 1, 0, 0, 0, 0, 0, 0.999703, 0, 380.0, 0, 1.1, 0.9 ],
[23, 1, 109.821478, 21.964296, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[24, 1, 0, 0, 0, 0, 0, 0.999976, 0, 380.0, 0, 1.1, 0.9 ],
[25, 1, 52.528378, 10.505676, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[26, 1, 0, 0, 0, 0, 0, 1.000149, 0, 380.0, 0, 1.1, 0.9 ],
[27, 1, 64.480039, 12.896008, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[28, 1, 190.519197, 38.103839, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[29, 1, 69.981668, 13.996334, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[30, 1, 0, 0, 0, 0, 0, 0.99858, 0, 380.0, 0, 1.1, 0.9 ],
[31, 1, 137.722115, 27.544423, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[32, 1, 0, 0, 0, 0, 0, 0.99603, 0, 380.0, 0, 1.1, 0.9 ],
[33, 1, 172.677651, 34.53553, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[34, 1, 34.258436, 6.851687, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[35, 1, 2.26809, 0.453618, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[36, 1, 7.509318, 1.501864, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[37, 1, 0, 0, 0, 0, 0, 1.002564, 0, 380.0, 0, 1.1, 0.9 ],
[38, 1, 180.916243, 36.183249, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[39, 1, 59.240749, 11.84815, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[40, 1, 61.878815, 12.375763, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[41, 1, 66.505702, 13.30114, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[42, 1, 0, 0, 0, 0, 0, 1.00183, 0, 380.0, 0, 1.1, 0.9 ],
[43, 1, 101.98949, 20.397898, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[44, 1, 130.480431, 26.096086, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[45, 1, 69.261931, 13.852386, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[46, 1, 0, 0, 0, 0, 0, 1.000079, 0, 380.0, 0, 1.1, 0.9 ],
[47, 1, 301.156435, 60.231287, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[48, 1, 207.004945, 41.400989, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[49, 1, 52.361807, 10.472361, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[50, 1, 76.245859, 15.249172, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[51, 1, 98.809657, 19.761931, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[52, 1, 0, 0, 0, 0, 0, 1.000228, 0, 380.0, 0, 1.1, 0.9 ],
[53, 1, 149.927828, 29.985566, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[54, 1, 76.172066, 15.234413, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[55, 1, 74.702537, 14.940507, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[56, 1, 0, 0, 0, 0, 0, 0.999807, 0, 380.0, 0, 1.1, 0.9 ],
[57, 1, 89.171493, 17.834299, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[58, 1, 204.260867, 40.852173, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[59, 1, 58.338152, 11.66763, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[60, 1, 30.757492, 6.151498, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[61, 1, 0, 0, 0, 0, 0, 1.000301, 0, 380.0, 0, 1.1, 0.9 ],
[62, 1, 234.488335, 46.897667, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[63, 1, 138.416456, 27.683291, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[64, 1, 1468.879109, 293.775822, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[65, 1, 4.89433, 0.978866, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[66, 1, 155.291504, 31.058301, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[67, 1, 333.126436, 66.625287, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[68, 1, 0, 0, 0, 0, 0, 0.998579, 0, 380.0, 0, 1.1, 0.9 ],
[69, 1, 0, 0, 0, 0, 0, 1.001236, 0, 380.0, 0, 1.1, 0.9 ],
[70, 1, 630.199236, 126.039847, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[71, 1, 146.450185, 29.290037, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[72, 1, 239.865307, 47.973061, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[73, 1, 76.789923, 15.357985, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[74, 1, 0, 0, 0, 0, 0, 1.002256, 0, 380.0, 0, 1.1, 0.9 ],
[75, 1, 95.707271, 19.141454, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[76, 1, 92.378515, 18.475703, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[77, 1, 89.474905, 17.894981, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[78, 1, 0, 0, 0, 0, 0, 0.995115, 0, 380.0, 0, 1.1, 0.9 ],
[79, 1, 92.389736, 18.477947, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[80, 1, 98.132155, 19.626431, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[81, 1, 110.777838, 22.155568, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[82, 1, 3.686752, 0.73735, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[83, 1, 246.670862, 49.334172, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[84, 1, 24.283224, 4.856645, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[85, 1, 84.209508, 16.841902, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[86, 1, 0, 0, 0, 0, 0, 0.999888, 0, 380.0, 0, 1.1, 0.9 ],
[87, 1, 0, 0, 0, 0, 0, 0.998257, 0, 380.0, 0, 1.1, 0.9 ],
[88, 1, 67.968233, 13.593647, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[89, 1, 84.324997, 16.864999, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[90, 1, 97.391648, 19.47833, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[91, 1, 33.829009, 6.765802, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[92, 1, 36.919317, 7.383863, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[93, 1, 36.210454, 7.242091, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[94, 1, 0, 0, 0, 0, 0, 1.000491, 0, 380.0, 0, 1.1, 0.9 ],
[95, 1, 0, 0, 0, 0, 0, 1.000641, 0, 380.0, 0, 1.1, 0.9 ],
[96, 1, 0, 0, 0, 0, 0, 0.999997, 0, 380.0, 0, 1.1, 0.9 ],
[97, 1, 5.09273, 1.018546, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[98, 1, 93.634818, 18.726964, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[99, 1, 0, 0, 0, 0, 0, 1.001075, 0, 380.0, 0, 1.1, 0.9 ],
[100, 1, 0, 0, 0, 0, 0, 1.001052, 0, 380.0, 0, 1.1, 0.9 ],
[101, 1, 66.303, 13.2606, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[102, 1, 128.332546, 25.666509, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[103, 1, 150.045579, 30.009116, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[104, 1, 0, 0, 0, 0, 0, 0.999879, 0, 380.0, 0, 1.1, 0.9 ],
[105, 1, 0, 0, 0, 0, 0, 0.999743, 0, 380.0, 0, 1.1, 0.9 ],
[106, 1, 0, 0, 0, 0, 0, 0.99981, 0, 380.0, 0, 1.1, 0.9 ],
[107, 1, 0, 0, 0, 0, 0, 0.999995, 0, 380.0, 0, 1.1, 0.9 ],
[108, 1, 105.838863, 21.167773, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[109, 1, 42.85235, 8.57047, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[110, 1, 55.624068, 11.124814, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[111, 1, 98.024637, 19.604927, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[112, 1, 49.612823, 9.922565, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[113, 1, 78.207781, 15.641556, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[114, 1, 115.180938, 23.036188, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[115, 1, 74.250379, 14.850076, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[116, 1, 124.247798, 24.84956, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[117, 1, 0, 0, 0, 0, 0, 1.000743, 0, 380.0, 0, 1.1, 0.9 ],
[118, 1, 192.379937, 38.475987, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[119, 1, 37.291132, 7.458226, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[120, 1, 0, 0, 0, 0, 0, 1.001241, 0, 380.0, 0, 1.1, 0.9 ],
[121, 1, 50.641374, 10.128275, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[122, 1, 44.336008, 8.867202, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[123, 1, 0, 0, 0, 0, 0, 1.000222, 0, 380.0, 0, 1.1, 0.9 ],
[124, 1, 0, 0, 0, 0, 0, 1.000006, 0, 380.0, 0, 1.1, 0.9 ],
[125, 1, 0, 0, 0, 0, 0, 0.999784, 0, 380.0, 0, 1.1, 0.9 ],
[126, 1, 232.454793, 46.490959, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[127, 1, 179.71201, 35.942402, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[128, 1, 0, 0, 0, 0, 0, 1.001303, 0, 380.0, 0, 1.1, 0.9 ],
[129, 1, 0, 0, 0, 0, 0, 0.999998, 0, 380.0, 0, 1.1, 0.9 ],
[130, 1, 247.79017, 49.558034, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[131, 1, 54.711855, 10.942371, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[132, 1, 142.461399, 28.49228, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[133, 1, 47.718988, 9.543798, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[134, 1, 47.523579, 9.504716, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[135, 1, 47.586587, 9.517317, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[136, 1, 46.098531, 9.219706, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[137, 1, 36.874582, 7.374916, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[138, 1, 0, 0, 0, 0, 0, 0.998397, 0, 380.0, 0, 1.1, 0.9 ],
[139, 1, 72.233569, 14.446714, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[140, 1, 49.952606, 9.990521, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[141, 1, 59.185021, 11.837004, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[142, 1, 65.124651, 13.02493, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[143, 1, 0, 0, 0, 0, 0, 0.999978, 0, 380.0, 0, 1.1, 0.9 ],
[144, 1, 59.321823, 11.864365, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[145, 1, 172.568754, 34.513751, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[146, 1, 222.473586, 44.494717, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[147, 1, 136.363208, 27.272642, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[148, 1, 192.43352, 38.486704, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[149, 1, 124.060498, 24.8121, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[150, 1, 161.973861, 32.394772, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[151, 1, 38.168893, 7.633779, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[152, 1, 79.234664, 15.846933, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[153, 1, 141.367527, 28.273505, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[154, 1, 145.212503, 29.042501, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[155, 1, 151.251656, 30.250331, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[156, 1, 0, 0, 0, 0, 0, 0.99999, 0, 380.0, 0, 1.1, 0.9 ],
[157, 1, 0, 0, 0, 0, 0, 1.000399, 0, 380.0, 0, 1.1, 0.9 ],
[158, 1, 39.849774, 7.969955, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[159, 1, 0, 0, 0, 0, 0, 1.000913, 0, 380.0, 0, 1.1, 0.9 ],
[160, 1, 0, 0, 0, 0, 0, 1.000006, 0, 380.0, 0, 1.1, 0.9 ],
[161, 1, 123.710729, 24.742146, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[162, 1, 184.910877, 36.982175, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[163, 1, 36.980429, 7.396086, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[164, 1, 37.12915, 7.42583, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[165, 1, 0, 0, 0, 0, 0, 0.999992, 0, 380.0, 0, 1.1, 0.9 ],
[166, 1, 43.409983, 8.681997, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[167, 1, 61.066919, 12.213384, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[168, 1, 41.677393, 8.335479, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[169, 1, 142.673732, 28.534746, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[170, 1, 107.207278, 21.441456, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[171, 1, 91.501372, 18.300274, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[172, 1, 44.90638, 8.981276, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[173, 1, 42.898885, 8.579777, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[174, 1, 64.375855, 12.875171, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[175, 1, 42.870768, 8.574154, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[176, 1, 149.38871, 29.877742, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[177, 1, 24.360006, 4.872001, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[178, 1, 129.016567, 25.803313, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[179, 1, 47.538152, 9.50763, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[180, 1, 41.787252, 8.35745, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[181, 1, 31.539814, 6.307963, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[182, 1, 1.428768, 0.285754, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[183, 1, 427.675301, 85.53506, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[184, 1, 0, 0, 0, 0, 0, 0.999846, 0, 380.0, 0, 1.1, 0.9 ],
[185, 1, 91.45589, 18.291178, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[186, 1, 49.248522, 9.849704, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[187, 1, 28.80537, 5.761074, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[188, 1, 42.870768, 8.574154, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[189, 1, 157.308849, 31.46177, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[190, 1, 208.070385, 41.614077, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[191, 1, 0, 0, 0, 0, 0, 1.000002, 0, 380.0, 0, 1.1, 0.9 ],
[192, 1, 50.109652, 10.02193, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[193, 1, 42.801615, 8.560323, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[194, 1, 29.54664, 5.909328, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[195, 1, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[196, 1, 41.452214, 8.290443, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[197, 1, 65.67553, 13.135106, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[198, 1, 38.863262, 7.772652, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[199, 1, 50.035156, 10.007031, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[200, 1, 42.871763, 8.574353, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[201, 1, 0, 0, 0, 0, 0, 0.995789, 0, 380.0, 0, 1.1, 0.9 ],
[202, 1, 43.931388, 8.786278, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[203, 1, 5.788354, 1.157671, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[204, 1, 169.655503, 33.931101, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[205, 1, 84.835389, 16.967078, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[206, 1, 40.715058, 8.143012, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[207, 1, 121.069046, 24.213809, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[208, 1, 35.65006, 7.130012, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[209, 1, 49.541125, 9.908225, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[210, 1, 56.913481, 11.382696, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[211, 1, 200.006728, 40.001346, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[212, 1, 50.128865, 10.025773, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[213, 1, 234.992914, 46.998583, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[214, 1, 158.120444, 31.624089, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[215, 1, 334.353571, 66.870714, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[216, 1, 112.739588, 22.547918, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[217, 1, 36.125768, 7.225154, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[218, 1, 110.058409, 22.011682, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[219, 1, 176.877277, 35.375455, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[220, 1, 0, 0, 0, 0, 0, 0.999726, 0, 380.0, 0, 1.1, 0.9 ],
[221, 1, 100.900193, 20.180039, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[222, 1, 0.0, 0.0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[223, 1, 99.998337, 19.999667, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[224, 1, 116.28429, 23.256858, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[225, 1, 208.795114, 41.759023, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[226, 1, 72.938585, 14.587717, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[227, 1, 90.866684, 18.173337, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[228, 1, 89.092008, 17.818402, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[229, 1, 197.14542, 39.429084, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[230, 1, 47.28673, 9.457346, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[231, 1, 0, 0, 0, 0, 0, 1.000811, 0, 380.0, 0, 1.1, 0.9 ],
[232, 1, 0, 0, 0, 0, 0, 0.999978, 0, 380.0, 0, 1.1, 0.9 ],
[233, 1, 0, 0, 0, 0, 0, 0.99979, 0, 380.0, 0, 1.1, 0.9 ],
[234, 1, 168.440592, 33.688118, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[235, 1, 54.774635, 10.954927, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[236, 1, 0, 0, 0, 0, 0, 0.999977, 0, 380.0, 0, 1.1, 0.9 ],
[237, 1, 0.453322, 0.090664, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[238, 1, 61.978496, 12.395699, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[239, 1, 85.631065, 17.126213, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[240, 1, 540.144333, 108.028867, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[241, 1, 399.688043, 79.937609, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[242, 1, 145.533649, 29.10673, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[243, 1, 117.416634, 23.483327, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[244, 1, 139.893197, 27.978639, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[245, 1, 0, 0, 0, 0, 0, 1.001672, 0, 380.0, 0, 1.1, 0.9 ],
[246, 1, 0, 0, 0, 0, 0, 0.999903, 0, 380.0, 0, 1.1, 0.9 ],
[247, 1, 27.761014, 5.552203, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[248, 1, 0, 0, 0, 0, 0, 0.999998, 0, 380.0, 0, 1.1, 0.9 ],
[249, 1, 0, 0, 0, 0, 0, 0.999997, 0, 380.0, 0, 1.1, 0.9 ],
[250, 1, 0, 0, 0, 0, 0, 0.999995, 0, 380.0, 0, 1.1, 0.9 ],
[251, 1, 68.896541, 13.779308, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[252, 1, 176.68811, 35.337622, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[253, 1, 77.572822, 15.514564, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[254, 1, 24.767715, 4.953543, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[255, 1, 121.805558, 24.361112, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[256, 1, 139.68978, 27.937956, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[257, 1, 67.417378, 13.483476, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[258, 1, 219.705093, 43.941019, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[259, 1, 0, 0, 0, 0, 0, 0.999451, 0, 380.0, 0, 1.1, 0.9 ],
[260, 1, 136.735819, 27.347164, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[261, 1, 0, 0, 0, 0, 0, 1.002014, 0, 380.0, 0, 1.1, 0.9 ],
[262, 1, 0, 0, 0, 0, 0, 0.999835, 0, 380.0, 0, 1.1, 0.9 ],
[263, 1, 196.147354, 39.229471, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[264, 1, 253.92333, 50.784666, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[265, 1, 0, 0, 0, 0, 0, 1.000007, 0, 380.0, 0, 1.1, 0.9 ],
[266, 1, 122.37413, 24.474826, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[267, 1, 154.776724, 30.955345, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[268, 1, 53.822426, 10.764485, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[269, 1, 43.221426, 8.644285, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[270, 1, 0, 0, 0, 0, 0, 1.000024, 0, 380.0, 0, 1.1, 0.9 ],
[271, 1, 0.0, 0.0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[272, 1, 0.881876, 0.176375, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[273, 1, 120.596996, 24.119399, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[274, 1, 234.424673, 46.884935, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[275, 1, 43.885579, 8.777116, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[276, 1, 171.077142, 34.215428, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[277, 1, 0, 0, 0, 0, 0, 0.998348, 0, 380.0, 0, 1.1, 0.9 ],
[278, 1, 133.553677, 26.710735, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[279, 1, 0, 0, 0, 0, 0, 0.998187, 0, 380.0, 0, 1.1, 0.9 ],
[280, 1, 0, 0, 0, 0, 0, 0.999391, 0, 380.0, 0, 1.1, 0.9 ],
[281, 1, 176.408413, 35.281683, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[282, 1, 249.468816, 49.893763, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[283, 1, 99.997934, 19.999587, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[284, 1, 151.701497, 30.340299, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[285, 1, 67.653546, 13.530709, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[286, 1, 141.790904, 28.358181, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[287, 1, 87.147805, 17.429561, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[288, 1, 56.052861, 11.210572, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[289, 1, 88.154894, 17.630979, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[290, 1, 0, 0, 0, 0, 0, 1.004301, 0, 380.0, 0, 1.1, 0.9 ],
[291, 1, 58.013694, 11.602739, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[292, 1, 114.37134, 22.874268, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[293, 1, 100.799787, 20.159957, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[294, 1, 26.86162, 5.372324, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[295, 1, 56.203865, 11.240773, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[296, 1, 159.562905, 31.912581, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[297, 1, 167.702403, 33.540481, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[298, 1, 88.550202, 17.71004, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[299, 1, 85.760282, 17.152056, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[300, 1, 233.634231, 46.726846, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[301, 1, 0, 0, 0, 0, 0, 0.999191, 0, 380.0, 0, 1.1, 0.9 ],
[302, 1, 196.808259, 39.361652, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[303, 1, 101.08643, 20.217286, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[304, 1, 86.802988, 17.360598, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[305, 1, 0, 0, 0, 0, 0, 0.999666, 0, 380.0, 0, 1.1, 0.9 ],
[306, 1, 0, 0, 0, 0, 0, 1.001164, 0, 380.0, 0, 1.1, 0.9 ],
[307, 1, 102.95641, 20.591282, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[308, 1, 126.931537, 25.386307, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[309, 1, 207.677844, 41.535569, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[310, 1, 0, 0, 0, 0, 0, 1.000017, 0, 380.0, 0, 1.1, 0.9 ],
[311, 1, 176.403425, 35.280685, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[312, 1, 79.333529, 15.866706, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[313, 1, 0, 0, 0, 0, 0, 1.000778, 0, 380.0, 0, 1.1, 0.9 ],
[314, 1, 245.724773, 49.144955, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[315, 1, 0, 0, 0, 0, 0, 1.001526, 0, 380.0, 0, 1.1, 0.9 ],
[316, 1, 96.278161, 19.255632, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[317, 1, 129.635017, 25.927003, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[318, 1, 213.038189, 42.607638, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[319, 1, 7.63188, 1.526376, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[320, 1, 0, 0, 0, 0, 0, 0.999998, 0, 380.0, 0, 1.1, 0.9 ],
[321, 1, 180.535054, 36.107011, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[322, 1, 22.983275, 4.596655, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[323, 1, 2.391214, 0.478243, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[324, 1, 422.708797, 84.541759, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[325, 1, 137.699212, 27.539842, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[326, 1, 11.164225, 2.232845, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[327, 1, 96.075777, 19.215155, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[328, 1, 163.72789, 32.745578, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[329, 1, 246.261342, 49.252268, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[330, 1, 0, 0, 0, 0, 0, 1.001569, 0, 380.0, 0, 1.1, 0.9 ],
[331, 1, 19.552313, 3.910463, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[332, 1, 0, 0, 0, 0, 0, 0.994739, 0, 380.0, 0, 1.1, 0.9 ],
[333, 1, 205.441331, 41.088266, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[334, 1, 0, 0, 0, 0, 0, 0.999232, 0, 380.0, 0, 1.1, 0.9 ],
[335, 1, 209.668377, 41.933675, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[336, 1, 0, 0, 0, 0, 0, 0.9977, 0, 380.0, 0, 1.1, 0.9 ],
[337, 1, 83.399933, 16.679987, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[338, 1, 226.359269, 45.271854, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[339, 1, 140.000077, 28.000015, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[340, 1, 118.367236, 23.673447, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[341, 1, 107.006346, 21.401269, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[342, 1, 185.620799, 37.12416, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[343, 1, 101.834277, 20.366855, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[344, 1, 255.322923, 51.064585, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[345, 1, 279.185563, 55.837113, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[346, 1, 277.160087, 55.432017, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[347, 1, 96.927693, 19.385539, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[348, 1, 253.375374, 50.675075, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[349, 1, 0, 0, 0, 0, 0, 1.001056, 0, 380.0, 0, 1.1, 0.9 ],
[350, 1, 132.924419, 26.584884, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[351, 1, 0, 0, 0, 0, 0, 1.00086, 0, 380.0, 0, 1.1, 0.9 ],
[352, 1, 879.86585, 175.97317, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[353, 1, 2.645171, 0.529034, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[354, 1, 17.971061, 3.594212, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[355, 1, 0.0, 0.0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[356, 1, 0.0, 0.0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[357, 1, 0.045048, 0.00901, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[358, 1, 0, 0, 0, 0, 0, 1.000758, 0, 380.0, 0, 1.1, 0.9 ],
[359, 1, 2.63018, 0.526036, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[360, 1, 0, 0, 0, 0, 0, 1.000731, 0, 380.0, 0, 1.1, 0.9 ],
[361, 1, 67.317091, 13.463418, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[362, 1, 191.888549, 38.37771, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[363, 1, 282.522131, 56.504426, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[364, 1, 66.657207, 13.331441, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[365, 1, 59.828383, 11.965677, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[366, 1, 118.579664, 23.715933, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[367, 1, 57.316484, 11.463297, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[368, 1, 28.223579, 5.644716, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[369, 1, 23.192262, 4.638452, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[370, 1, 68.278681, 13.655736, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[371, 1, 343.548261, 68.709652, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[372, 1, 199.228573, 39.845715, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[373, 1, 134.439585, 26.887917, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[374, 1, 68.938343, 13.787669, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[375, 1, 226.141702, 45.22834, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[376, 1, 248.034855, 49.606971, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[377, 1, 177.489911, 35.497982, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[378, 1, 177.14828, 35.429656, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[379, 1, 61.055424, 12.211085, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[380, 1, 0, 0, 0, 0, 0, 1.001148, 0, 380.0, 0, 1.1, 0.9 ],
[381, 1, 204.173062, 40.834612, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[382, 1, 0, 0, 0, 0, 0, 0.999931, 0, 380.0, 0, 1.1, 0.9 ],
[383, 1, 0, 0, 0, 0, 0, 0.999121, 0, 380.0, 0, 1.1, 0.9 ],
[384, 1, 72.047602, 14.40952, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[385, 1, 90.938214, 18.187643, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[386, 1, 73.066128, 14.613226, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[387, 1, 148.802155, 29.760431, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[388, 1, 799.065393, 159.813079, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[389, 1, 0, 0, 0, 0, 0, 0.999939, 0, 380.0, 0, 1.1, 0.9 ],
[390, 1, 65.976961, 13.195392, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[391, 1, 75.153385, 15.030677, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[392, 1, 144.21859, 28.843718, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[393, 1, 180.102036, 36.020407, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[394, 1, 64.777524, 12.955505, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[395, 1, 89.777647, 17.955529, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[396, 1, 63.588588, 12.717718, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[397, 1, 509.910433, 101.982087, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[398, 1, 220.853223, 44.170645, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[399, 1, 94.099558, 18.819912, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[400, 1, 50.134668, 10.026934, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[401, 1, 0, 0, 0, 0, 0, 1.000621, 0, 380.0, 0, 1.1, 0.9 ],
[402, 1, 0, 0, 0, 0, 0, 1.000413, 0, 380.0, 0, 1.1, 0.9 ],
[403, 1, 24.893029, 4.978606, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[404, 1, 87.69968, 17.539936, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[405, 1, 661.168885, 132.233777, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[406, 1, 50.094976, 10.018995, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[407, 1, 99.164103, 19.832821, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[408, 1, 286.726974, 57.345395, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[409, 1, 0, 0, 0, 0, 0, 0.999952, 0, 380.0, 0, 1.1, 0.9 ],
[410, 1, 37.122513, 7.424503, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[411, 1, 35.100857, 7.020171, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[412, 1, 2.465451, 0.49309, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[413, 1, 123.079761, 24.615952, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[414, 1, 10.450803, 2.090161, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[415, 1, 0, 0, 0, 0, 0, 1.000224, 0, 380.0, 0, 1.1, 0.9 ],
[416, 1, 148.830435, 29.766087, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[417, 1, 5.823451, 1.16469, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[418, 1, 121.357209, 24.271442, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[419, 1, 64.864566, 12.972913, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[420, 1, 65.305436, 13.061087, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[421, 1, 94.070816, 18.814163, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[422, 1, 68.919432, 13.783886, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[423, 1, 144.746037, 28.949207, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[424, 1, 10.435816, 2.087163, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[425, 1, 85.704384, 17.140877, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[426, 1, 7.100872, 1.420174, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[427, 1, 59.675844, 11.935169, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[428, 1, 26.756796, 5.351359, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[429, 1, 301.944101, 60.38882, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[430, 1, 160.835237, 32.167047, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[431, 1, 107.552993, 21.510599, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[432, 1, 125.722851, 25.14457, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[433, 1, 64.26617, 12.853234, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[434, 1, 33.447245, 6.689449, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[435, 1, 133.767923, 26.753585, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[436, 1, 71.41645, 14.28329, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[437, 1, 16.264347, 3.252869, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[438, 1, 43.649054, 8.729811, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[439, 1, 81.268896, 16.253779, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[440, 1, 68.680522, 13.736104, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[441, 1, 52.652822, 10.530564, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[442, 1, 69.677507, 13.935501, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[443, 1, 151.067394, 30.213479, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[444, 1, 0, 0, 0, 0, 0, 0.999997, 0, 380.0, 0, 1.1, 0.9 ],
[445, 1, 68.643278, 13.728656, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[446, 1, 31.829272, 6.365854, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[447, 1, 60.513666, 12.102733, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[448, 1, 44.471399, 8.89428, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[449, 1, 224.239852, 44.84797, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[450, 1, 137.224089, 27.444818, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[451, 1, 58.63653, 11.727306, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[452, 1, 0, 0, 0, 0, 0, 0.999998, 0, 380.0, 0, 1.1, 0.9 ],
[453, 1, 39.297852, 7.85957, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[454, 1, 27.416773, 5.483355, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[455, 1, 44.700742, 8.940148, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[456, 1, 44.700742, 8.940148, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[457, 1, 137.085966, 27.417193, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[458, 1, 130.386039, 26.077208, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[459, 1, 158.68466, 31.736932, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[460, 1, 208.544338, 41.708868, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[461, 1, 216.931333, 43.386267, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[462, 1, 66.360419, 13.272084, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[463, 1, 34.003494, 6.800699, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[464, 1, 34.044597, 6.808919, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[465, 1, 54.991329, 10.998266, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[466, 1, 44.646002, 8.9292, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[467, 1, 41.200867, 8.240173, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[468, 1, 67.553137, 13.510627, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[469, 1, 41.861325, 8.372265, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[470, 1, 106.604729, 21.320946, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[471, 1, 104.961969, 20.992394, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[472, 1, 36.712533, 7.342507, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[473, 1, 67.412964, 13.482593, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[474, 1, 34.818091, 6.963618, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[475, 1, 34.168679, 6.833736, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[476, 1, 38.616228, 7.723246, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[477, 1, 62.31824, 12.463648, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[478, 1, 78.283068, 15.656614, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[479, 1, 141.866305, 28.373261, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[480, 1, 62.182572, 12.436514, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[481, 1, 54.002224, 10.800445, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[482, 1, 61.317202, 12.26344, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[483, 1, 52.145787, 10.429157, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[484, 1, 40.87976, 8.175952, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[485, 1, 61.063542, 12.212708, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[486, 1, 561.754759, 112.350952, 0, 0, 0, 0.999618, 0, 220.0, 0, 1.1, 0.9 ],
[487, 1, 142.346059, 28.469212, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[488, 1, 410.163442, 82.032688, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[489, 1, 107.953851, 21.59077, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[490, 1, 33.591213, 6.718243, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[491, 1, 46.188348, 9.23767, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[492, 1, 72.026592, 14.405318, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[493, 1, 92.833655, 18.566731, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[494, 1, 126.878139, 25.375628, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[495, 1, 99.875772, 19.975154, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[496, 1, 7.074367, 1.414873, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[497, 1, 884.647456, 176.929491, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[498, 1, 41.489168, 8.297834, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[499, 1, 57.912081, 11.582416, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[500, 1, 31.706183, 6.341237, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[501, 1, 53.641395, 10.728279, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[502, 1, 211.711478, 42.342296, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[503, 1, 64.838967, 12.967793, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[504, 1, 42.459601, 8.49192, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[505, 1, 301.156435, 60.231287, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[506, 1, 94.529299, 18.90586, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[507, 1, 89.917368, 17.983474, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[508, 1, 130.720174, 26.144035, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[509, 1, 172.263261, 34.452652, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[510, 1, 108.828993, 21.765799, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[511, 1, 94.932131, 18.986426, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[512, 1, 62.708534, 12.541707, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[513, 1, 34.54571, 6.909142, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[514, 1, 85.98093, 17.196186, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[515, 1, 76.700097, 15.340019, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[516, 1, 85.809361, 17.161872, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[517, 1, 40.306711, 8.061342, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[518, 1, 227.009948, 45.40199, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[519, 1, 22.341935, 4.468387, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[520, 1, 90.20304, 18.040608, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[521, 1, 81.483977, 16.296795, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[522, 1, 69.76724, 13.953448, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[523, 1, 37.554912, 7.510982, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[524, 1, 109.002803, 21.800561, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[525, 1, 129.859259, 25.971852, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[526, 1, 39.370885, 7.874177, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[527, 1, 43.226465, 8.645293, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[528, 1, 94.345802, 18.86916, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[529, 1, 120.937347, 24.187469, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[530, 1, 51.248308, 10.249662, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[531, 1, 52.105989, 10.421198, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[532, 1, 50.012666, 10.002533, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[533, 1, 44.817384, 8.963477, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[534, 1, 123.631426, 24.726285, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[535, 1, 154.778611, 30.955722, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[536, 1, 121.998901, 24.39978, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[537, 1, 40.584007, 8.116801, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[538, 1, 30.337835, 6.067567, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[539, 1, 32.190308, 6.438062, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[540, 1, 28.985958, 5.797192, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[541, 1, 74.873232, 14.974646, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[542, 1, 102.852677, 20.570535, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[543, 1, 56.177626, 11.235525, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[544, 1, 104.631618, 20.926324, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[545, 1, 225.289033, 45.057807, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[546, 1, 112.918266, 22.583653, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[547, 1, 145.954278, 29.190856, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[548, 1, 47.246003, 9.449201, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[549, 1, 40.399373, 8.079875, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[550, 1, 33.336353, 6.667271, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[551, 1, 32.13544, 6.427088, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[552, 1, 159.580975, 31.916195, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[553, 1, 1.104053, 0.220811, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[554, 1, 161.672187, 32.334437, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[555, 1, 61.598893, 12.319779, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[556, 1, 95.295538, 19.059108, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[557, 1, 202.468734, 40.493747, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[558, 1, 119.387449, 23.87749, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[559, 1, 63.895013, 12.779003, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[560, 1, 99.819127, 19.963825, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[561, 1, 54.737895, 10.947579, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[562, 1, 149.539829, 29.907966, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[563, 1, 105.138687, 21.027737, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[564, 1, 207.596629, 41.519326, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[565, 1, 156.641943, 31.328389, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[566, 1, 0.251601, 0.05032, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[567, 1, 254.628504, 50.925701, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[568, 1, 235.469759, 47.093952, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[569, 1, 165.678176, 33.135635, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[570, 1, 258.653467, 51.730693, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[571, 1, 190.440365, 38.088073, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[572, 1, 335.905008, 67.181002, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[573, 1, 97.777544, 19.555509, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[574, 1, 186.30356, 37.260712, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[575, 1, 3.500978, 0.700196, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[576, 1, 226.543879, 45.308776, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[577, 1, 249.741009, 49.948202, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[578, 1, 238.444353, 47.688871, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[579, 1, 86.991008, 17.398202, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[580, 1, 18.110233, 3.622047, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[581, 1, 0.104063, 0.020813, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[582, 1, 65.522917, 13.104583, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[583, 1, 75.152378, 15.030476, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[584, 1, 43.118835, 8.623767, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[585, 1, 74.859603, 14.971921, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ]
])
ppc["gen"] = array([
[586, 0.0, 0, 9999, -9999, 1.0, 100, 1, 272.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[589, 63.1, 0, 9999, -9999, 1.0, 100, 1, 63.1, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[590, 38.0, 0, 9999, -9999, 1.0, 100, 1, 38.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[593, 11.1, 0, 9999, -9999, 1.0, 100, 1, 11.1, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[595, 1280.540535, 0, 9999, -9999, 1.0, 100, 1, 4730.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[598, 12.0, 0, 9999, -9999, 1.0, 100, 1, 12.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[599, 9.3, 0, 9999, -9999, 1.0, 100, 1, 9.3, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[602, 24.6, 0, 9999, -9999, 1.0, 100, 1, 24.6, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[603, 1138.281464, 0, 9999, -9999, 1.0, 100, 1, 3455.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[607, 1800.0, 0, 9999, -9999, 1.0, 100, 1, 1800.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[608, 24.0, 0, 9999, -9999, 1.0, 100, 1, 24.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[609, 36.4, 0, 9999, -9999, 1.0, 100, 1, 36.4, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[612, 30.0, 0, 9999, -9999, 1.0, 100, 1, 30.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[614, 30.0, 0, 9999, -9999, 1.0, 100, 1, 30.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[616, 29.0, 0, 9999, -9999, 1.0, 100, 1, 29.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[617, 137.0, 0, 9999, -9999, 1.0, 100, 1, 137.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[618, 33.4, 0, 9999, -9999, 1.0, 100, 1, 33.4, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[619, 118.0, 0, 9999, -9999, 1.0, 100, 1, 118.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[624, 27.0, 0, 9999, -9999, 1.0, 100, 1, 27.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[629, 75.3, 0, 9999, -9999, 1.0, 100, 1, 75.3, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[632, 45.1, 0, 9999, -9999, 1.0, 100, 1, 45.1, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[637, 53.7, 0, 9999, -9999, 1.0, 100, 1, 53.7, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[638, 128.7, 0, 9999, -9999, 1.0, 100, 1, 128.7, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[640, 12.0, 0, 9999, -9999, 1.0, 100, 1, 12.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[641, 12.6, 0, 9999, -9999, 1.0, 100, 1, 12.6, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[642, 28.9, 0, 9999, -9999, 1.0, 100, 1, 28.9, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[643, 857.0, 0, 9999, -9999, 1.0, 100, 1, 857.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[647, 14.0, 0, 9999, -9999, 1.0, 100, 1, 14.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[652, 46.9, 0, 9999, -9999, 1.0, 100, 1, 46.9, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[655, 61.5, 0, 9999, -9999, 1.0, 100, 1, 61.5, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[663, 15.0, 0, 9999, -9999, 1.0, 100, 1, 15.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[666, 28.9, 0, 9999, -9999, 1.0, 100, 1, 28.9, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[672, 33.1, 0, 9999, -9999, 1.0, 100, 1, 33.1, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[676, 370.0, 0, 9999, -9999, 1.0, 100, 1, 370.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[681, 40.1, 0, 9999, -9999, 1.0, 100, 1, 40.1, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[683, 27.5, 0, 9999, -9999, 1.0, 100, 1, 27.5, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[687, 1329.0, 0, 9999, -9999, 1.0, 100, 1, 1329.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[696, 97.960552, 0, 9999, -9999, 1.0, 100, 1, 721.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[697, 11.6, 0, 9999, -9999, 1.0, 100, 1, 11.6, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[698, 24.0, 0, 9999, -9999, 1.0, 100, 1, 24.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[702, 73.4, 0, 9999, -9999, 1.0, 100, 1, 73.4, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[705, 17.0, 0, 9999, -9999, 1.0, 100, 1, 17.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[707, 34.0, 0, 9999, -9999, 1.0, 100, 1, 34.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[713, 13.4, 0, 9999, -9999, 1.0, 100, 1, 13.4, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[714, 15.0, 0, 9999, -9999, 1.0, 100, 1, 15.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[716, 0.1, 0, 9999, -9999, 1.0, 100, 1, 0.1, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[717, 11.0, 0, 9999, -9999, 1.0, 100, 1, 11.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[719, 1276.753631, 0, 9999, -9999, 1.0, 100, 1, 1958.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[722, 20.7, 0, 9999, -9999, 1.0, 100, 1, 20.7, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[724, 12.1, 0, 9999, -9999, 1.0, 100, 1, 12.1, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[727, 61.5, 0, 9999, -9999, 1.0, 100, 1, 61.5, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[728, 510.0, 0, 9999, -9999, 1.0, 100, 1, 510.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[730, 633.2, 0, 9999, -9999, 1.0, 100, 1, 633.2, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[732, 14.6, 0, 9999, -9999, 1.0, 100, 1, 14.6, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[735, 84.8, 0, 9999, -9999, 1.0, 100, 1, 84.8, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[738, 138.5, 0, 9999, -9999, 1.0, 100, 1, 138.5, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[741, 214.0, 0, 9999, -9999, 1.0, 100, 1, 214.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[742, 9.0, 0, 9999, -9999, 1.0, 100, 1, 9.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[747, 12.5, 0, 9999, -9999, 1.0, 100, 1, 12.5, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[748, 110.0, 0, 9999, -9999, 1.0, 100, 1, 110.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[749, 16.0, 0, 9999, -9999, 1.0, 100, 1, 16.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[750, 90.8, 0, 9999, -9999, 1.0, 100, 1, 90.8, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[758, 18.5, 0, 9999, -9999, 1.0, 100, 1, 18.5, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[761, 15.7, 0, 9999, -9999, 1.0, 100, 1, 15.7, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[762, 621.39271, 0, 9999, -9999, 1.0, 100, 1, 1105.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[765, 59.0, 0, 9999, -9999, 1.0, 100, 1, 59.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[767, 11.2, 0, 9999, -9999, 1.0, 100, 1, 11.2, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[774, 33.5, 0, 9999, -9999, 1.0, 100, 1, 33.5, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[777, 79.0, 0, 9999, -9999, 1.0, 100, 1, 79.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[778, 14.7, 0, 9999, -9999, 1.0, 100, 1, 14.7, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[781, 956.625433, 0, 9999, -9999, 1.0, 100, 1, 1310.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[784, 961.754454, 0, 9999, -9999, 1.0, 100, 1, 1275.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[785, 3.0, 0, 9999, -9999, 1.0, 100, 1, 3.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[787, 778.0, 0, 9999, -9999, 1.0, 100, 1, 778.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[788, 875.0, 0, 9999, -9999, 1.0, 100, 1, 875.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[789, 77.4, 0, 9999, -9999, 1.0, 100, 1, 77.4, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[791, 10.0, 0, 9999, -9999, 1.0, 100, 1, 10.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[792, 62.7, 0, 9999, -9999, 1.0, 100, 1, 62.7, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[795, 13.6, 0, 9999, -9999, 1.0, 100, 1, 13.6, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[800, 36.5, 0, 9999, -9999, 1.0, 100, 1, 36.5, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[801, 50.0, 0, 9999, -9999, 1.0, 100, 1, 50.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[802, 500.0, 0, 9999, -9999, 1.0, 100, 1, 500.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[805, 732.670179, 0, 9999, -9999, 1.0, 100, 1, 1410.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[806, 35.8, 0, 9999, -9999, 1.0, 100, 1, 35.8, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[808, 217.5, 0, 9999, -9999, 1.0, 100, 1, 217.5, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[809, 12.5, 0, 9999, -9999, 1.0, 100, 1, 12.5, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[811, 25.2, 0, 9999, -9999, 1.0, 100, 1, 25.2, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[814, 89.0, 0, 9999, -9999, 1.0, 100, 1, 89.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[816, 80.1, 0, 9999, -9999, 1.0, 100, 1, 80.1, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[817, 54.0, 0, 9999, -9999, 1.0, 100, 1, 54.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[821, 82.5, 0, 9999, -9999, 1.0, 100, 1, 82.5, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[822, 134.0, 0, 9999, -9999, 1.0, 100, 1, 134.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[826, 58.0, 0, 9999, -9999, 1.0, 100, 1, 58.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[834, 23.3, 0, 9999, -9999, 1.0, 100, 1, 23.3, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[835, 63.7, 0, 9999, -9999, 1.0, 100, 1, 63.7, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[836, 25.5, 0, 9999, -9999, 1.0, 100, 1, 25.5, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[837, 472.0, 0, 9999, -9999, 1.0, 100, 1, 472.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[839, 73.3, 0, 9999, -9999, 1.0, 100, 1, 73.3, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[841, 23.3, 0, 9999, -9999, 1.0, 100, 1, 23.3, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[843, 333.0, 0, 9999, -9999, 1.0, 100, 1, 333.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[844, 40.0, 0, 9999, -9999, 1.0, 100, 1, 40.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[850, 16.0, 0, 9999, -9999, 1.0, 100, 1, 16.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[851, 79.5, 0, 9999, -9999, 1.0, 100, 1, 79.5, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[853, 11.6, 0, 9999, -9999, 1.0, 100, 1, 11.6, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[856, 36.0, 0, 9999, -9999, 1.0, 100, 1, 36.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[857, 1402.0, 0, 9999, -9999, 1.0, 100, 1, 1402.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[858, 56.8, 0, 9999, -9999, 1.0, 100, 1, 56.8, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[860, 25.0, 0, 9999, -9999, 1.0, 100, 1, 25.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[865, 11.0, 0, 9999, -9999, 1.0, 100, 1, 11.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[867, 769.0, 0, 9999, -9999, 1.0, 100, 1, 769.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[870, 58.4, 0, 9999, -9999, 1.0, 100, 1, 58.4, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[872, 22.5, 0, 9999, -9999, 1.0, 100, 1, 22.5, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[873, 122.0, 0, 9999, -9999, 1.0, 100, 1, 122.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[874, 20.7, 0, 9999, -9999, 1.0, 100, 1, 20.7, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[875, 24.4, 0, 9999, -9999, 1.0, 100, 1, 24.4, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[877, 24.8, 0, 9999, -9999, 1.0, 100, 1, 24.8, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[882, 17.4, 0, 9999, -9999, 1.0, 100, 1, 17.4, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[883, 18.0, 0, 9999, -9999, 1.0, 100, 1, 18.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[885, 69.386873, 0, 9999, -9999, 1.0, 100, 1, 490.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[886, 2572.0, 0, 9999, -9999, 1.0, 100, 1, 2572.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[889, 9.5, 0, 9999, -9999, 1.0, 100, 1, 9.5, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[890, 48.0, 0, 9999, -9999, 1.0, 100, 1, 48.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[895, 19.0, 0, 9999, -9999, 1.0, 100, 1, 19.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[896, 24.0, 0, 9999, -9999, 1.0, 100, 1, 24.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[898, 84.6, 0, 9999, -9999, 1.0, 100, 1, 84.6, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[900, 112.6, 0, 9999, -9999, 1.0, 100, 1, 112.6, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[902, 19.5, 0, 9999, -9999, 1.0, 100, 1, 19.5, 0.0, 0, 0, 0, 0, 0, 0, 0, | |
LA12_0 = self.input.LA(1)
if (LA12_0 == DIGIT) :
alt12 = 1
if alt12 == 1:
# bsdl.g:110:28: DIGIT
pass
self.match(self.input, DIGIT, self.FOLLOW_DIGIT_in_port_def927)
else:
if cnt12 >= 1:
break #loop12
eee = EarlyExitException(12, self.input)
raise eee
cnt12 += 1
self.match(self.input, TO, self.FOLLOW_TO_in_port_def930)
# bsdl.g:110:38: ( DIGIT )+
cnt13 = 0
while True: #loop13
alt13 = 2
LA13_0 = self.input.LA(1)
if (LA13_0 == DIGIT) :
alt13 = 1
if alt13 == 1:
# bsdl.g:110:38: DIGIT
pass
self.match(self.input, DIGIT, self.FOLLOW_DIGIT_in_port_def932)
else:
if cnt13 >= 1:
break #loop13
eee = EarlyExitException(13, self.input)
raise eee
cnt13 += 1
self.match(self.input, CPAREN, self.FOLLOW_CPAREN_in_port_def935)
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
finally:
pass
return value
# $ANTLR end "port_def"
class identifier_return(ParserRuleReturnScope):
def __init__(self):
ParserRuleReturnScope.__init__(self)
self.value = None
# $ANTLR start "identifier"
# bsdl.g:112:1: identifier returns [value] : ( FULLCASE_WORD | WORD ) ( FULLCASE_WORD | WORD | DIGIT )* ( '_' ( FULLCASE_WORD | WORD | DIGIT )+ )* ;
def identifier(self, ):
retval = self.identifier_return()
retval.start = self.input.LT(1)
try:
try:
# bsdl.g:113:5: ( ( FULLCASE_WORD | WORD ) ( FULLCASE_WORD | WORD | DIGIT )* ( '_' ( FULLCASE_WORD | WORD | DIGIT )+ )* )
# bsdl.g:113:7: ( FULLCASE_WORD | WORD ) ( FULLCASE_WORD | WORD | DIGIT )* ( '_' ( FULLCASE_WORD | WORD | DIGIT )+ )*
pass
if (FULLCASE_WORD <= self.input.LA(1) <= WORD):
self.input.consume()
self._state.errorRecovery = False
else:
mse = MismatchedSetException(None, self.input)
raise mse
# bsdl.g:113:28: ( FULLCASE_WORD | WORD | DIGIT )*
while True: #loop15
alt15 = 2
LA15_0 = self.input.LA(1)
if (LA15_0 == DIGIT or (FULLCASE_WORD <= LA15_0 <= WORD)) :
alt15 = 1
if alt15 == 1:
# bsdl.g:
pass
if self.input.LA(1) == DIGIT or (FULLCASE_WORD <= self.input.LA(1) <= WORD):
self.input.consume()
self._state.errorRecovery = False
else:
mse = MismatchedSetException(None, self.input)
raise mse
else:
break #loop15
# bsdl.g:113:56: ( '_' ( FULLCASE_WORD | WORD | DIGIT )+ )*
while True: #loop17
alt17 = 2
LA17_0 = self.input.LA(1)
if (LA17_0 == USCORE) :
alt17 = 1
if alt17 == 1:
# bsdl.g:113:57: '_' ( FULLCASE_WORD | WORD | DIGIT )+
pass
self.match(self.input, USCORE, self.FOLLOW_USCORE_in_identifier968)
# bsdl.g:113:61: ( FULLCASE_WORD | WORD | DIGIT )+
cnt16 = 0
while True: #loop16
alt16 = 2
LA16_0 = self.input.LA(1)
if (LA16_0 == DIGIT or (FULLCASE_WORD <= LA16_0 <= WORD)) :
alt16 = 1
if alt16 == 1:
# bsdl.g:
pass
if self.input.LA(1) == DIGIT or (FULLCASE_WORD <= self.input.LA(1) <= WORD):
self.input.consume()
self._state.errorRecovery = False
else:
mse = MismatchedSetException(None, self.input)
raise mse
else:
if cnt16 >= 1:
break #loop16
eee = EarlyExitException(16, self.input)
raise eee
cnt16 += 1
else:
break #loop17
#action start
retval.value = self.input.toString(retval.start, self.input.LT(-1)).upper()
#action end
retval.stop = self.input.LT(-1)
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
finally:
pass
return retval
# $ANTLR end "identifier"
# $ANTLR start "portmode"
# bsdl.g:116:1: portmode returns [value] : ( IN | OUT | INOUT | BUFFER | LINKAGE | BUS );
def portmode(self, ):
value = None
try:
try:
# bsdl.g:117:5: ( IN | OUT | INOUT | BUFFER | LINKAGE | BUS )
# bsdl.g:
pass
if (IN <= self.input.LA(1) <= BUS):
self.input.consume()
self._state.errorRecovery = False
else:
mse = MismatchedSetException(None, self.input)
raise mse
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
finally:
pass
return value
# $ANTLR end "portmode"
# $ANTLR start "string"
# bsdl.g:119:1: string returns [value] : (s= STRING ANDSIGN )* s1= STRING ;
def string(self, ):
value = None
s = None
s1 = None
try:
try:
# bsdl.g:120:5: ( (s= STRING ANDSIGN )* s1= STRING )
# bsdl.g:120:7: (s= STRING ANDSIGN )* s1= STRING
pass
#action start
str_parts = []
#action end
# bsdl.g:121:7: (s= STRING ANDSIGN )*
while True: #loop18
alt18 = 2
LA18_0 = self.input.LA(1)
if (LA18_0 == STRING) :
LA18_1 = self.input.LA(2)
if (LA18_1 == ANDSIGN) :
alt18 = 1
if alt18 == 1:
# bsdl.g:121:8: s= STRING ANDSIGN
pass
s=self.match(self.input, STRING, self.FOLLOW_STRING_in_string1039)
self.match(self.input, ANDSIGN, self.FOLLOW_ANDSIGN_in_string1041)
#action start
str_parts.append(s.text[1:-1])
#action end
else:
break #loop18
s1=self.match(self.input, STRING, self.FOLLOW_STRING_in_string1058)
#action start
str_parts.append(s1.text[1:-1])
#action end
#action start
value = "".join(str_parts)
#action end
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
finally:
pass
return value
# $ANTLR end "string"
class scinot_number_return(ParserRuleReturnScope):
def __init__(self):
ParserRuleReturnScope.__init__(self)
# $ANTLR start "scinot_number"
# bsdl.g:126:1: scinot_number : ( DIGIT )* DOT ( DIGIT )* 'e' ( '+' )? ( DIGIT )* ;
def scinot_number(self, ):
retval = self.scinot_number_return()
retval.start = self.input.LT(1)
try:
try:
# bsdl.g:127:5: ( ( DIGIT )* DOT ( DIGIT )* 'e' ( '+' )? ( DIGIT )* )
# bsdl.g:127:7: ( DIGIT )* DOT ( DIGIT )* 'e' ( '+' )? ( DIGIT )*
pass
# bsdl.g:127:7: ( DIGIT )*
while True: #loop19
alt19 = 2
LA19_0 = self.input.LA(1)
if (LA19_0 == DIGIT) :
alt19 = 1
if alt19 == 1:
# bsdl.g:127:7: DIGIT
pass
self.match(self.input, DIGIT, self.FOLLOW_DIGIT_in_scinot_number1087)
else:
break #loop19
self.match(self.input, DOT, self.FOLLOW_DOT_in_scinot_number1090)
# bsdl.g:127:18: ( DIGIT )*
while True: #loop20
alt20 = 2
LA20_0 = self.input.LA(1)
if (LA20_0 == DIGIT) :
alt20 = 1
if alt20 == 1:
# bsdl.g:127:18: DIGIT
pass
self.match(self.input, DIGIT, self.FOLLOW_DIGIT_in_scinot_number1092)
else:
break #loop20
self.match(self.input, 123, self.FOLLOW_123_in_scinot_number1095)
# bsdl.g:127:29: ( '+' )?
alt21 = 2
LA21_0 = self.input.LA(1)
if (LA21_0 == 124) :
alt21 = 1
if alt21 == 1:
# bsdl.g:127:29: '+'
pass
self.match(self.input, 124, self.FOLLOW_124_in_scinot_number1097)
# bsdl.g:127:34: ( DIGIT )*
while True: #loop22
alt22 = 2
LA22_0 = self.input.LA(1)
if (LA22_0 == DIGIT) :
alt22 = 1
if alt22 == 1:
# bsdl.g:127:34: DIGIT
pass
self.match(self.input, DIGIT, self.FOLLOW_DIGIT_in_scinot_number1100)
else:
break #loop22
retval.stop = self.input.LT(-1)
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
finally:
pass
return retval
# $ANTLR end "scinot_number"
# Delegated rules
# lookup tables for DFA #10
DFA10_eot = DFA.unpack(
u"\21\uffff"
)
DFA10_eof = DFA.unpack(
u"\21\uffff"
)
DFA10_min = DFA.unpack(
u"\1\35\2\12\1\27\1\35\1\37\1\12\1\32\1\6\1\11\2\uffff\3\27\1\14"
u"\1\6"
)
DFA10_max = DFA.unpack(
u"\1\36\2\51\2\36\1\44\1\51\1\33\1\14\1\11\2\uffff\1\27\1\34\2\27"
u"\1\14"
)
DFA10_accept = DFA.unpack(
u"\12\uffff\1\2\1\1\5\uffff"
)
DFA10_special = DFA.unpack(
u"\21\uffff"
)
DFA10_transition = [
DFA.unpack(u"\2\1"),
DFA.unpack(u"\1\5\12\uffff\1\4\1\uffff\1\2\5\uffff\2\2\12\uffff"
u"\1\3"),
DFA.unpack(u"\1\5\12\uffff\1\4\1\uffff\1\2\5\uffff\2\2\12\uffff"
u"\1\3"),
DFA.unpack(u"\1\6\5\uffff\2\6"),
DFA.unpack(u"\2\1"),
DFA.unpack(u"\6\7"),
DFA.unpack(u"\1\5\12\uffff\1\4\1\uffff\1\6\5\uffff\2\6\12\uffff"
u"\1\3"),
DFA.unpack(u"\1\10\1\11"),
DFA.unpack(u"\1\13\5\uffff\1\12"),
DFA.unpack(u"\1\14"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"\1\15"),
DFA.unpack(u"\1\15\4\uffff\1\16"),
DFA.unpack(u"\1\17"),
DFA.unpack(u"\1\20\12\uffff\1\17"),
DFA.unpack(u"\1\13\5\uffff\1\12")
]
# class definition for DFA #10
DFA10 = DFA
# lookup tables for DFA #11
DFA11_eot = DFA.unpack(
u"\7\uffff"
)
DFA11_eof = DFA.unpack(
u"\7\uffff"
)
DFA11_min = DFA.unpack(
u"\1\35\2\12\1\27\2\uffff\1\12"
)
DFA11_max = DFA.unpack(
u"\1\36\2\51\1\36\2\uffff\1\51"
)
DFA11_accept = DFA.unpack(
u"\4\uffff\1\2\1\1\1\uffff"
)
DFA11_special = DFA.unpack(
u"\7\uffff"
)
DFA11_transition = [
DFA.unpack(u"\2\1"),
DFA.unpack(u"\1\4\12\uffff\1\5\1\uffff\1\2\5\uffff\2\2\12\uffff"
u"\1\3"),
DFA.unpack(u"\1\4\12\uffff\1\5\1\uffff\1\2\5\uffff\2\2\12\uffff"
u"\1\3"),
DFA.unpack(u"\1\6\5\uffff\2\6"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"\1\4\12\uffff\1\5\1\uffff\1\6\5\uffff\2\6\12\uffff"
u"\1\3")
]
# class definition for DFA #11
DFA11 = DFA
FOLLOW_entity_in_eval61 = frozenset([])
FOLLOW_EOF_in_eval73 = frozenset([1])
FOLLOW_ENTITY_in_entity97 = frozenset([29, 30])
FOLLOW_identifier_in_entity101 = frozenset([5])
FOLLOW_IS_in_entity103 = frozenset([8])
FOLLOW_generic_in_entity130 = frozenset([6])
FOLLOW_SCOLON_in_entity132 = frozenset([8, 25])
FOLLOW_port_list_in_entity143 = frozenset([6])
FOLLOW_SCOLON_in_entity145 = frozenset([7, 13, 16, 24])
FOLLOW_use_in_entity164 = frozenset([6])
FOLLOW_SCOLON_in_entity166 = frozenset([7, 13, 16, 24])
FOLLOW_attribute_in_entity208 = frozenset([6])
FOLLOW_constant_in_entity225 = frozenset([6])
FOLLOW_SCOLON_in_entity238 = frozenset([7, 16, 24])
FOLLOW_END_in_entity253 = frozenset([29, 30])
FOLLOW_identifier_in_entity255 = frozenset([6])
FOLLOW_SCOLON_in_entity257 = frozenset([1])
FOLLOW_GENERIC_in_generic279 = frozenset([9])
FOLLOW_OPAREN_in_generic281 = frozenset([29, 30])
FOLLOW_identifier_in_generic295 = frozenset([10])
FOLLOW_COLON_in_generic297 = frozenset([29, 30])
FOLLOW_identifier_in_generic299 = frozenset([10])
FOLLOW_COLON_in_generic313 = frozenset([11])
FOLLOW_EQUAL_in_generic315 = frozenset([37])
FOLLOW_string_in_generic319 = frozenset([12])
FOLLOW_CPAREN_in_generic329 = frozenset([1])
FOLLOW_USE_in_use349 = frozenset([29, 30])
FOLLOW_identifier_in_use353 = frozenset([14])
FOLLOW_DOT_in_use355 = frozenset([15])
FOLLOW_ALL_in_use357 = frozenset([1])
FOLLOW_ATTRIBUTE_in_attribute388 = frozenset([29, 30])
FOLLOW_identifier_in_attribute401 = frozenset([17])
FOLLOW_OF_in_attribute403 = frozenset([29, 30])
FOLLOW_identifier_in_attribute407 = frozenset([10])
FOLLOW_COLON_in_attribute409 = frozenset([4, 18])
FOLLOW_general_attribute_assignment_in_attribute435 = frozenset([1])
FOLLOW_ENTITY_in_general_attribute_assignment459 = frozenset([5])
FOLLOW_IS_in_general_attribute_assignment461 = frozenset([23, 29, 30, 37])
FOLLOW_identifier_in_general_attribute_assignment496 = frozenset([1])
FOLLOW_string_in_general_attribute_assignment519 = frozenset([1])
FOLLOW_number_in_general_attribute_assignment542 = frozenset([1])
FOLLOW_SIGNAL_in_general_attribute_assignment576 = frozenset([5])
FOLLOW_IS_in_general_attribute_assignment578 = frozenset([9, 19, 20])
FOLLOW_TRUE_in_general_attribute_assignment612 = frozenset([1])
FOLLOW_FALSE_in_general_attribute_assignment616 = frozenset([1])
FOLLOW_OPAREN_in_general_attribute_assignment639 = frozenset([14, 23])
FOLLOW_scinot_number_in_general_attribute_assignment641 = frozenset([21])
FOLLOW_COMMA_in_general_attribute_assignment665 = frozenset([22])
FOLLOW_BOTH_in_general_attribute_assignment667 = frozenset([12])
FOLLOW_CPAREN_in_general_attribute_assignment669 = frozenset([1])
FOLLOW_DIGIT_in_number713 = frozenset([1, 23])
FOLLOW_CONSTANT_in_constant737 = frozenset([29, 30])
FOLLOW_identifier_in_constant741 = frozenset([10])
FOLLOW_COLON_in_constant745 = frozenset([29, 30])
FOLLOW_identifier_in_constant747 = frozenset([10])
FOLLOW_COLON_in_constant758 = frozenset([11])
FOLLOW_EQUAL_in_constant760 = frozenset([37])
FOLLOW_string_in_constant764 = frozenset([1])
FOLLOW_PORT_in_port_list797 = frozenset([9])
FOLLOW_OPAREN_in_port_list799 = frozenset([29, 30])
FOLLOW_port_def_in_port_list813 = frozenset([6])
FOLLOW_SCOLON_in_port_list817 = frozenset([29, 30])
FOLLOW_port_def_in_port_list833 = frozenset([12])
FOLLOW_CPAREN_in_port_list846 = frozenset([1])
FOLLOW_identifier_in_port_def874 = frozenset([21])
FOLLOW_COMMA_in_port_def878 = frozenset([29, 30])
| |
paths. This is calculated using
the value of ``platform``. Ex. ``local_path_mac``.
:ivar str py_version: Simple version of Python executable as a string. Eg. ``2.7``.
:ivar str ssl_version: Version of OpenSSL installed. Eg. ``OpenSSL 1.0.2g 1 Mar 2016``. This
info is only available in Python 2.7+ if the ssl module was imported successfully.
Defaults to ``unknown``
"""
def __init__(self):
system = sys.platform.lower()
if system == "darwin":
self.platform = "mac"
elif system.startswith("linux"):
self.platform = "linux"
elif system == "win32":
self.platform = "windows"
else:
self.platform = None
if self.platform:
self.local_path_field = "local_path_%s" % (self.platform)
else:
self.local_path_field = None
self.py_version = ".".join(str(x) for x in sys.version_info[:2])
# extract the OpenSSL version if we can. The version is only available in Python 2.7 and
# only if we successfully imported ssl
self.ssl_version = "unknown"
try:
self.ssl_version = ssl.OPENSSL_VERSION
except (AttributeError, NameError):
pass
def __str__(self):
return "ClientCapabilities: platform %s, local_path_field %s, "\
"py_verison %s, ssl version %s" % (self.platform, self.local_path_field,
self.py_version, self.ssl_version)
class _Config(object):
"""
Container for the client configuration.
"""
def __init__(self, sg):
"""
:param sg: Shotgun connection.
"""
self._sg = sg
self.max_rpc_attempts = 3
# rpc_attempt_interval stores the number of milliseconds to wait between
# request retries. By default, this will be 3000 milliseconds. You can
# override this by setting this property on the config like so:
#
# sg = Shotgun(site_name, script_name, script_key)
# sg.config.rpc_attempt_interval = 1000 # adjusting default interval
#
# Or by setting the ``SHOTGUN_API_RETRY_INTERVAL`` environment variable.
# In the case that the environment variable is already set, setting the
# property on the config will override it.
self.rpc_attempt_interval = 3000
# From http://docs.python.org/2.6/library/httplib.html:
# If the optional timeout parameter is given, blocking operations
# (like connection attempts) will timeout after that many seconds
# (if it is not given, the global default timeout setting is used)
self.timeout_secs = None
self.api_ver = "api3"
self.convert_datetimes_to_utc = True
self._records_per_page = None
self.api_key = None
self.script_name = None
self.user_login = None
self.user_password = <PASSWORD>
self.auth_token = None
self.sudo_as_login = None
# Authentication parameters to be folded into final auth_params dict
self.extra_auth_params = None
# uuid as a string
self.session_uuid = None
self.scheme = None
self.server = None
self.api_path = None
# The raw_http_proxy reflects the exact string passed in
# to the Shotgun constructor. This can be useful if you
# need to construct a Shotgun API instance based on
# another Shotgun API instance.
self.raw_http_proxy = None
# if a proxy server is being used, the proxy_handler
# below will contain a urllib2.ProxyHandler instance
# which can be used whenever a request needs to be made.
self.proxy_handler = None
self.proxy_server = None
self.proxy_port = 8080
self.proxy_user = None
self.proxy_pass = None
self.session_token = None
self.authorization = None
self.no_ssl_validation = False
self.localized = False
def set_server_params(self, base_url):
"""
Set the different server related fields based on the passed in URL.
This will impact the following attributes:
- scheme: http or https
- api_path: usually /api3/json
- server: usually something.shotgunstudio.com
:param str base_url: The server URL.
:raises ValueError: Raised if protocol is not http or https.
"""
self.scheme, self.server, api_base, _, _ = \
urllib.parse.urlsplit(base_url)
if self.scheme not in ("http", "https"):
raise ValueError(
"base_url must use http or https got '%s'" % base_url
)
self.api_path = urllib.parse.urljoin(urllib.parse.urljoin(
api_base or "/", self.api_ver + "/"), "json"
)
@property
def records_per_page(self):
"""
The records per page value from the server.
"""
if self._records_per_page is None:
# Check for api_max_entities_per_page in the server info and change the record per page
# value if it is supplied.
self._records_per_page = self._sg.server_info.get("api_max_entities_per_page") or 500
return self._records_per_page
class Shotgun(object):
"""
Shotgun Client connection.
"""
# reg ex from
# http://underground.infovark.com/2008/07/22/iso-date-validation-regex/
# Note a length check is done before checking the reg ex
_DATE_PATTERN = re.compile(
r"^(\d{4})\D?(0[1-9]|1[0-2])\D?([12]\d|0[1-9]|3[01])$")
_DATE_TIME_PATTERN = re.compile(
r"^(\d{4})\D?(0[1-9]|1[0-2])\D?([12]\d|0[1-9]|3[01])"
r"(\D?([01]\d|2[0-3])\D?([0-5]\d)\D?([0-5]\d)?\D?(\d{3})?)?$")
_MULTIPART_UPLOAD_CHUNK_SIZE = 20000000
def __init__(self,
base_url,
script_name=None,
api_key=None,
convert_datetimes_to_utc=True,
http_proxy=None,
ensure_ascii=True,
connect=True,
ca_certs=None,
login=None,
password=<PASSWORD>,
sudo_as_login=None,
session_token=None,
auth_token=None):
"""
Initializes a new instance of the Shotgun client.
:param str base_url: http or https url of the Shotgun server. Do not include the trailing
slash::
https://example.shotgunstudio.com
:param str script_name: name of the Script entity used to authenticate to the server.
If provided, then ``api_key`` must be as well, and neither ``login`` nor ``password``
can be provided.
.. seealso:: :ref:`authentication`
:param str api_key: API key for the provided ``script_name``. Used to authenticate to the
server. If provided, then ``script_name`` must be as well, and neither ``login`` nor
``password`` can be provided.
.. seealso:: :ref:`authentication`
:param bool convert_datetimes_to_utc: (optional) When ``True``, datetime values are converted
from local time to UTC time before being sent to the server. Datetimes received from
the server are then converted back to local time. When ``False`` the client should use
UTC date time values. Default is ``True``.
:param str http_proxy: (optional) URL for a proxy server to use for all connections. The
expected str format is ``[username:password@]111.222.333.444[:8080]``. Examples::
192.168.0.1
192.168.0.1:8888
joe:[email protected]:8888
:param bool connect: (optional) When ``True``, as soon as the :class:`~shotgun_api3.Shotgun`
instance is created, a connection will be made to the Shotgun server to determine the
server capabilities and confirm this version of the client is compatible with the server
version. This is mostly used for testing. Default is ``True``.
:param str ca_certs: (optional) path to an external SSL certificates file. By default, the
Shotgun API will use its own built-in certificates file which stores root certificates
for the most common Certificate Authorities (CAs). If you are using a corporate or
internal CA, or are packaging an application into an executable, it may be necessary to
point to your own certificates file. You can do this by passing in the full path to the
file via this parameter or by setting the environment variable ``SHOTGUN_API_CACERTS``.
In the case both are set, this parameter will take precedence.
:param str login: The user login str to use to authenticate to the server when using user-based
authentication. If provided, then ``password`` must be as well, and neither
``script_name`` nor ``api_key`` can be provided.
.. seealso:: :ref:`authentication`
:param str password: The password str to use to authenticate to the server when using user-based
authentication. If provided, then ``login`` must be as well and neither ``script_name``
nor ``api_key`` can be provided.
See :ref:`authentication` for more info.
:param str sudo_as_login: A user login string for the user whose permissions will be applied
to all actions. Event log entries will be generated showing this user performing all
actions with an additional extra meta-data parameter ``sudo_actual_user`` indicating the
script or user that is actually authenticated.
:param str session_token: The session token to use to authenticate to the server. This
can be used as an alternative to authenticating with a script user or regular user.
You can retrieve the session token by running the
:meth:`~shotgun_api3.Shotgun.get_session_token()` method.
.. todo: Add this info to the Authentication section of the docs
:param str auth_token: The authentication token required to authenticate to a server with
two-factor authentication turned on. If provided, then ``login`` and ``password`` must
be provided as well, and neither ``script_name`` nor ``api_key`` can be provided.
.. note:: These tokens can be short lived so a session is established right away if an
``auth_token`` is provided. A
:class:`~shotgun_api3.MissingTwoFactorAuthenticationFault` will be raised if the
``auth_token`` is invalid.
.. todo: Add this info to the Authentication section of the docs
.. note:: A note about proxy connections: If you are using Python <= v2.6.2, HTTPS
connections through a proxy server will not work due to a bug in the :mod:`urllib2`
library (see http://bugs.python.org/issue1424152). This will affect upload and
download-related methods in the Shotgun API (eg. :meth:`~shotgun_api3.Shotgun.upload`,
:meth:`~shotgun_api3.Shotgun.upload_thumbnail`,
:meth:`~shotgun_api3.Shotgun.upload_filmstrip_thumbnail`,
:meth:`~shotgun_api3.Shotgun.download_attachment`. Normal CRUD methods for passing JSON
data should still work fine. If you cannot upgrade your Python installation, you can see
the patch merged into Python v2.6.3 (http://hg.python.org/cpython/rev/0f57b30a152f/) and
try and hack it | |
<filename>recognition/lpr_model.py
import tensorflow as tf
from recognition.lpr_util import NUM_CHARS
def small_inception_block(x, im, om, scope='incep_block'):
#参考squeezenet的fire module,先squeeze,然后expand
with tf.variable_scope(scope):
x = conv(x,im,int(om/4),ksize=[1,1])
x = tf.nn.relu(x)
#参考inception v3
#branch1
x1 = conv(x, int(om/4), int(om/4), ksize=[1,1], layer_name='conv1')
x1 = tf.nn.relu(x1)
#branch2
x2 = conv(x, int(om/4), int(om/4), ksize=[3,1], pad='SAME', layer_name='conv2_1')
x2 = tf.nn.relu(x2)
x2 = conv(x2, int(om/4), int(om/4), ksize=[1,3], pad='SAME', layer_name='conv2_2')
x2 = tf.nn.relu(x2)
#branch3
x3 = conv(x, int(om/4), int(om/4), ksize=[5,1], pad='SAME', layer_name='conv3_1')
x3 = tf.nn.relu(x3)
x3 = conv(x3, int(om/4), int(om/4), ksize=[1,5], pad='SAME', layer_name='conv3_2')
x3 = tf.nn.relu(x3)
#branch4
x4 = conv(x, int(om/4), int(om/4), ksize=[7,1], pad='SAME', layer_name='conv4_1')
x4 = tf.nn.relu(x4)
x4 = conv(x4, int(om/4), int(om/4), ksize=[1,7], pad='SAME', layer_name='conv4_2')
x4 = tf.nn.relu(x4)
#x4 = tf.nn.max_pool(x, ksize=[1, 3, 3, 1], strides=[1, 1, 1, 1], padding='SAME')
x = tf.concat([x1,x2,x3,x4], 3)
x = conv(x, om, om, ksize=[1,1], layer_name='conv5')
return x
def small_basic_block(x, im, om, scope='bas_block'):
'''提取小模块'''
with tf.variable_scope(scope):
x = conv(x,im,int(om/4),ksize=[1,1])
x = tf.nn.relu(x)
x1 = conv(x, int(om/4), int(om/4), ksize=[3,1], pad='SAME')
x2 = conv(x, int(om/4), int(om/4), ksize=[5,1], pad='SAME')
x3 = conv(x, int(om/4), int(om/4), ksize=[7,1], pad='SAME')
x1 = tf.nn.relu(x1)
x2 = tf.nn.relu(x2)
x3 = tf.nn.relu(x3)
x1 = conv(x1, int(om/4), int(om/4), ksize=[1,3], pad='SAME')
x2 = conv(x2, int(om/4), int(om/4), ksize=[1,5], pad='SAME')
x3 = conv(x3, int(om/4), int(om/4), ksize=[1,7], pad='SAME')
x1 = tf.nn.relu(x1)
x2 = tf.nn.relu(x2)
x3 = tf.nn.relu(x3)
x = tf.concat([x1,x2,x3], 3)
x = conv(x,int(om/4*3),om,ksize=[1,1])
return x
#深度可分离网络
def depth_sep_conv(x, im, om, ksize, stride=[1,1,1,1], pad='SAME', training=False, scope = 'sep_conv'):
with tf.variable_scope(scope):
conv_weights_d = tf.Variable(tf.truncated_normal([ksize[0], ksize[1], im, 1], stddev=0.1,seed=None, dtype=tf.float32))
conv_depthwise = tf.nn.depthwise_conv2d(x, conv_weights_d, strides=stride, padding=pad)
#print('conv_depthwise shape is :', conv_depthwise.get_shape().as_list())
conv_depthwise = tf.layers.batch_normalization(conv_depthwise, training)
conv_depthwise = tf.nn.relu(conv_depthwise)
conv_weights_s = tf.Variable(tf.truncated_normal([1, 1, im, om], stddev=0.1,seed=None, dtype=tf.float32))
conv_biases = tf.Variable(tf.zeros([om], dtype=tf.float32))
conv_sep = tf.nn.conv2d(conv_depthwise, conv_weights_s, strides=stride, padding=pad)
out = tf.nn.bias_add(conv_sep, conv_biases)
out = tf.layers.batch_normalization(out, training)
out = tf.nn.relu(out)
#print("out:", out.get_shape().as_list())
return out
def conv(x,im,om,ksize,stride=[1,1,1,1], pad='SAME', layer_name='conv'):
with tf.variable_scope(layer_name):
conv_weights = tf.Variable(
tf.truncated_normal([ksize[0], ksize[1], im, om], stddev=0.1,
seed=None, dtype=tf.float32), name='weight')
conv_biases = tf.Variable(tf.zeros([om], dtype=tf.float32), name='biase')
out = tf.nn.conv2d(x, conv_weights, strides=stride, padding=pad)
relu = tf.nn.bias_add(out, conv_biases)
return relu
#893128个变量
def get_train_model(num_channels, batch_size, img_size, training=False):
inputs = tf.placeholder(tf.float32, shape=(batch_size, img_size[0], img_size[1], num_channels))
# 定义ctc_loss需要的稀疏矩阵
targets = tf.sparse_placeholder(tf.int32)
# 1维向量 序列长度 [batch_size,]
seq_len = tf.placeholder(tf.int32, [None])
#输入:94*24*3
x = inputs
#卷积核:3*3*3*64,输出:94*24*64
x = conv(x,num_channels,64,ksize=[3,3])
x = tf.layers.batch_normalization(x, training=training)
x = tf.nn.relu(x)
x = tf.nn.max_pool(x,
ksize=[1, 3, 3, 1],
strides=[1, 1, 1, 1],
padding='SAME')
#输出:94*24*64
x = small_basic_block(x,64,64)
x2=x
x = tf.layers.batch_normalization(x, training=training)
x = tf.nn.relu(x)
#输出:47*24*64
x = tf.nn.max_pool(x,
ksize=[1, 3, 3, 1],
strides=[1, 2, 1, 1],
padding='SAME')
#输出:47*24*256
x = small_basic_block(x, 64,256)
x = tf.layers.batch_normalization(x, training=training)
x = tf.nn.relu(x)
#输出:47*24*256
x = small_basic_block(x, 256, 256)
x3 = x
x = tf.layers.batch_normalization(x, training=training)
x = tf.nn.relu(x)
#输出:24*24*256
x = tf.nn.max_pool(x,
ksize=[1, 3, 3, 1],
strides=[1, 2, 1, 1],
padding='SAME')
x = tf.layers.dropout(x)
#卷积核:4*1*256*256,输出:24*24*256
x = conv(x, 256, 256, ksize=[4, 1])
#函数默认的drop rate=0.5
x = tf.layers.dropout(x)
x = tf.layers.batch_normalization(x, training=training)
x = tf.nn.relu(x)
#卷积核:1*13*256*67,输出:24*24*67
x = conv(x,256,NUM_CHARS+1,ksize=[1,13],pad='SAME')
x = tf.nn.relu(x)
cx = tf.reduce_mean(tf.square(x))
x = tf.div(x,cx)
#池化:输入:94*24*3,输出:x1 = 24*24*3
x1 = tf.nn.avg_pool(inputs,
ksize=[1, 4, 1, 1],
strides=[1, 4, 1, 1],
padding='SAME')
cx1 = tf.reduce_mean(tf.square(x1))
x1 = tf.div(x1, cx1)
#池化:输入:94*24*64,输出:x1 = 24*24*64
x2 = tf.nn.avg_pool(x2,
ksize=[1, 4, 1, 1],
strides=[1, 4, 1, 1],
padding='SAME')
cx2 = tf.reduce_mean(tf.square(x2))
x2 = tf.div(x2, cx2)
#池化:输入:47*24*256,输出:x1 = 24*24*256
x3 = tf.nn.avg_pool(x3,
ksize=[1, 2, 1, 1],
strides=[1, 2, 1, 1],
padding='SAME')
cx3 = tf.reduce_mean(tf.square(x3))
x3 = tf.div(x3, cx3)
#通道合并:输入24*24*(67+3+64+256),输出:24*24*390
x = tf.concat([x,x1,x2,x3],3)
#卷积核:1*1*390*67,输出:24*24*67
x = conv(x, x.get_shape().as_list()[3], NUM_CHARS + 1, ksize=(1, 1))
#降维:输入:b*24*24*67,输出:b*24*67
logits = tf.reduce_mean(x, axis=2)
#返回值:logits:(b*24*67), inputs(b*94*24*3), targets(1), seq_len(n)
return logits, inputs, targets, seq_len
#981336个变量
def get_train_model_new(num_channels, batch_size, img_size, training=False):
inputs = tf.placeholder(tf.float32, shape=(batch_size, img_size[0], img_size[1], num_channels))
# 定义ctc_loss需要的稀疏矩阵
targets = tf.sparse_placeholder(tf.int32)
# 1维向量 序列长度 [batch_size,]
seq_len = tf.placeholder(tf.int32, [None])
#输入:94*24*3
x = inputs
#卷积核:3*3*3*64,输出:94*24*64
x = conv(x,num_channels,64,ksize=[3,3])
x = tf.layers.batch_normalization(x, training=training)
x = tf.nn.relu(x)
x = tf.nn.max_pool(x,
ksize=[1, 3, 3, 1],
strides=[1, 1, 1, 1],
padding='SAME')
#输出:94*24*128
x = small_inception_block(x, 64, 128)
x2 = x
x = tf.layers.batch_normalization(x, training=training)
x = tf.nn.relu(x)
#输出:47*24*64
x = tf.nn.max_pool(x,
ksize=[1, 3, 3, 1],
strides=[1, 2, 1, 1],
padding='SAME')
#输出:47*24*256
x = small_inception_block(x, 128, 256)
x = tf.layers.batch_normalization(x, training=training)
x = tf.nn.relu(x)
#输出:47*24*256
x = small_inception_block(x, 256, 256)
x3 = x
x = tf.layers.batch_normalization(x, training=training)
x = tf.nn.relu(x)
#输出:24*24*256
x = tf.nn.max_pool(x,
ksize=[1, 3, 3, 1],
strides=[1, 2, 1, 1],
padding='SAME')
x = tf.layers.dropout(x)
#卷积核:4*1*256*256,输出:24*24*256
x = conv(x, 256, 256, ksize=[4, 1])
#函数默认的drop rate=0.5
x = tf.layers.dropout(x)
x = tf.layers.batch_normalization(x, training=training)
x = tf.nn.relu(x)
#卷积核:1*13*256*67,输出:24*24*67
x = conv(x,256,NUM_CHARS+1,ksize=[1,13],pad='SAME')
x = tf.nn.relu(x)
cx = tf.reduce_mean(tf.square(x))
x = tf.div(x,cx)
#池化:输入:94*24*3,输出:x1 = 24*24*3
x1 = tf.nn.avg_pool(inputs,
ksize=[1, 4, 1, 1],
strides=[1, 4, 1, 1],
padding='SAME')
cx1 = tf.reduce_mean(tf.square(x1))
x1 = tf.div(x1, cx1)
#池化:输入:94*24*128,输出:x1 = 24*24*128
x2 = tf.nn.avg_pool(x2,
ksize=[1, 4, 1, 1],
strides=[1, 4, 1, 1],
padding='SAME')
cx2 = tf.reduce_mean(tf.square(x2))
x2 = tf.div(x2, cx2)
#池化:输入:47*24*256,输出:x1 = 24*24*256
x3 = tf.nn.avg_pool(x3,
ksize=[1, 2, 1, 1],
strides=[1, 2, 1, 1],
padding='SAME')
cx3 = tf.reduce_mean(tf.square(x3))
x3 = tf.div(x3, cx3)
#通道合并:输入24*24*(67+3+128+256),输出:24*24*454
x = tf.concat([x,x1,x2,x3],3)
#卷积核:1*1*454*67,输出:24*24*67
x = conv(x, x.get_shape().as_list()[3], NUM_CHARS + 1, ksize=(1, 1))
#降维:输入:b*24*24*67,输出:b*24*67
logits = tf.reduce_mean(x, axis=2)
#返回值:logits:(b*24*67), inputs(b*94*24*3), targets(1), seq_len(n)
return logits, inputs, targets, seq_len
#1107417个变量
def get_train_model_multitask(inputs, num_channels, batch_size, img_size, training=False):
#输入:96*36*3
x = inputs
#卷积核:3*3*3*64,输出:96*36*64
x = conv(x,num_channels,64,ksize=[3,3])
x = tf.layers.batch_normalization(x, training=training)
x = tf.nn.relu(x)
x = tf.nn.max_pool(x, ksize=[1, 3, 3, 1], strides=[1, 1, 1, 1], padding='SAME')
#输出:96*36*128
x = small_inception_block(x, 64, 128)
x2 = x
x = tf.layers.batch_normalization(x, training=training)
x = tf.nn.relu(x)
#输出:48*36*64
x = tf.nn.max_pool(x, ksize=[1, 3, 3, 1], strides=[1, 2, 1, 1], padding='SAME')
#输出:48*36*256
x = small_inception_block(x, 128, 256)
x = tf.layers.batch_normalization(x, training=training)
x = tf.nn.relu(x)
#输出:48*36*256
x = small_inception_block(x, 256, 256)
x3 = x
x = tf.layers.batch_normalization(x, training=training)
x = tf.nn.relu(x)
#输出:24*36*256
x = tf.nn.max_pool(x, ksize=[1, 3, 3, 1], strides=[1, 2, 1, 1], padding='SAME')
x = tf.layers.dropout(x)
x_classify = x
#输出:24*36*64
x_classify = conv(x_classify, 256, 32, ksize=[1, 1])
#输出:12*12*32
x_classify = tf.nn.max_pool(x_classify, ksize=[1, 3, 3, 1], strides = [1, 2, 3, 1], padding='SAME')
#输出:10*10*32
x_classify = conv(x_classify, 32, 32, ksize=[3, 3], stride=[1,1,1,1], pad='VALID')
#输出:5*5*32
x_classify = tf.nn.max_pool(x_classify, ksize=[1, 3, 3, 1], strides = [1, 2, 2, 1], padding='SAME')
#输出:800
cl_shape = x_classify.get_shape().as_list()
#nodes = cl_shape[1]*cl_shape[2]*cl_shape[3]
x_classify = tf.reshape(x_classify, [-1, cl_shape[1]*cl_shape[2]*cl_shape[3]])
dense = tf.layers.dense(inputs=x_classify, units=128, activation=tf.nn.relu, kernel_regularizer=tf.contrib.layers.l2_regularizer(0.003))
dense = tf.layers.dense(inputs=dense, units=32, activation=tf.nn.relu, kernel_regularizer=tf.contrib.layers.l2_regularizer(0.003))
logits_classify = tf.layers.dense(inputs=dense, units=1, activation=tf.nn.sigmoid)
#卷积核:4*1*256*256,输出:24*36*256
x = conv(x, 256, 256, ksize=[4, 1])
#函数默认的drop rate=0.5
x = tf.layers.dropout(x)
x = tf.layers.batch_normalization(x, training=training)
x = tf.nn.relu(x)
#卷积核:1*13*256*67,输出:24*36*67
x = conv(x,256,NUM_CHARS+1,ksize=[1,13],pad='SAME')
x = tf.nn.relu(x)
cx = tf.reduce_mean(tf.square(x))
x = tf.div(x,cx)
#池化:输入:96*36*3,输出:x1 = 24*36*3
x1 = tf.nn.avg_pool(inputs, ksize=[1, 4, 1, 1], strides=[1, 4, 1, 1], padding='SAME')
cx1 = tf.reduce_mean(tf.square(x1))
x1 = tf.div(x1, cx1)
#池化:输入:96*36*128,输出:x2 = 24*36*128
x2 = tf.nn.avg_pool(x2, ksize=[1, 4, 1, 1], strides=[1, 4, 1, 1], padding='SAME')
cx2 = tf.reduce_mean(tf.square(x2))
x2 = tf.div(x2, cx2)
#池化:输入:48*36*256,输出:x3 = 24*36*256
x3 = tf.nn.avg_pool(x3, ksize=[1, 2, 1, 1], strides=[1, 2, 1, 1], padding='SAME')
cx3 = tf.reduce_mean(tf.square(x3))
x3 = tf.div(x3, cx3)
#通道合并:输入24*36*(67+3+128+256),输出:24*36*454
x = tf.concat([x,x1,x2,x3],3)
#卷积核:1*1*454*67,输出:24*36*67
x = conv(x, x.get_shape().as_list()[3], NUM_CHARS + 1, ksize=(1, 1))
x_shape = x.get_shape().as_list()
x_up = tf.slice(x, [0, 0, 0, 0], [x_shape[0], x_shape[1], int(x_shape[2]/3), x_shape[3]])
x_down = tf.slice(x, [0, 0, int(x_shape[2]/3), 0], [x_shape[0], x_shape[1], int(x_shape[2]/3*2), x_shape[3]])
#降维:输入:b*24*36*67,输出:b*24*67
logits = tf.reduce_mean(x, axis=2)
logits_up = tf.reduce_mean(x_up, axis=2)
logits_down = tf.reduce_mean(x_down, axis=2)
#返回值:logits:(b*24*67), inputs(b*96*36*3), targets(1), seq_len(n)
return logits, logits_up, logits_down, logits_classify
#1168387个变量
def get_train_model_multitask_v2(inputs, num_channels, batch_size, img_size, training=False):
#输入:96*36*3
x = inputs
#卷积核:3*3*3*64,输出:96*36*64
x = conv(x,num_channels,64,ksize=[3,3], layer_name='conv1')
x = tf.layers.batch_normalization(x, training=training)
x = tf.nn.relu(x)
x = tf.nn.max_pool(x, ksize=[1, 3, 3, 1], strides=[1, 1, 1, 1], padding='SAME')
#输出:96*36*128
x = small_inception_block(x, 64, 128, scope='incep_block1')
x2 = x
x = tf.layers.batch_normalization(x, training=training)
x = tf.nn.relu(x)
#输出:48*36*64
x = tf.nn.max_pool(x, ksize=[1, 3, 3, 1], strides=[1, 2, 1, 1], padding='SAME')
#输出:48*36*256
x = small_inception_block(x, 128, 256, scope='incep_block2')
x = tf.layers.batch_normalization(x, training=training)
x = tf.nn.relu(x)
#输出:48*36*256
x = small_inception_block(x, 256, 256, scope='incep_block3')
x3 = x
x = tf.layers.batch_normalization(x, training=training)
x = tf.nn.relu(x)
#输出:24*36*256
x = tf.nn.max_pool(x, ksize=[1, 3, 3, 1], strides=[1, 2, 1, 1], padding='SAME')
x = tf.layers.dropout(inputs=x, rate=0.3, training=training)
with | |
# -*- coding: utf-8 -*-
"""
Feb 2018
ADAPTCAST
FORECAST PAKAGE
@author: <EMAIL>
"""
#Load utility libraries
# =============================================================================
import array
import random
#Data utilities
#import pandas as pd
import numpy as np
#To plot
# =============================================================================
# from matplotlib import pyplot
# =============================================================================
import matplotlib.pyplot as plt
from matplotlib import gridspec
import matplotlib.colors as colors
# from pandas.tools.plotting import autocorrelation_plot
# ModeL OPERATOR
#from sklearn.linear_model import LinearRegression
#from sklearn.metrics import mean_squared_error
#from math import sqrt
#GA optimization
from deap import algorithms
from deap import base
from deap import creator
from deap import tools
# =============================================================================
#CONSTRUCTOR
class AdaptativeOperator:
def __init__(self,lagConf=[],window=100,forecasts=100,delta=1):
#Define the atributes (parameters of the model)
self.lagConf=lagConf
self.window=window
self.forecasts=forecasts
self.delta=delta #Just 1 for now
#A triky way to pass bay the optimazer
self.maxLag=[]
self.gene_length=[]
self.windowRange=[]
self.targetData=[]
self.inputData=[]
self.test_length=[]
# =============================================================================
# ====== FUNCTIONS=======================================================
# =============================================================================
# Create a multiple lagged state space of the shape
# y(t)= f(y(t-1) y(t-2) x(t) x(t-1) x(t-2))
def shift(self, data,k):
x=np.zeros(k, dtype=float)
x.fill(np.nan)
return np.hstack((x,data))[0:len(data)].T
def createLags(self, inputData, lagConfiguration):
fistCol=1
laggedData=np.zeros(1, dtype=float)
laggedData.fill(np.nan)
for i in range(0, lagConfiguration.shape[0]):
lags=lagConfiguration[i]
if lags!=0:
for j in range(1, lags+1):
#names.append(list(data)[i]+" "+str(j))
if fistCol==1:
#laggedData=inputData.iloc[:,i].shift(j).dropna()
laggedData=self.shift(inputData[:,i],j).reshape(-1,1)
fistCol=0
else:
#laggedData=pd.concat([laggedData,inputData.iloc[:,i].shift(j)],axis=1).dropna()
laggedData=np.append(laggedData,self.shift(inputData[:,i],j).reshape(-1,1), axis=1)
#laggedData.columns=names
return laggedData
def createLagsOut(self, inputData, lagConfiguration):
fistCol=1
laggedData=np.zeros(1, dtype=float)
laggedData.fill(np.nan)
for i in range(0, lagConfiguration.shape[0]):
lags=lagConfiguration[i]
if lags!=0:
for j in range(0, lags):
#names.append(list(data)[i]+" "+str(j))
if fistCol==1:
#laggedData=inputData.iloc[:,i].shift(j).dropna()
laggedData=self.shift(inputData[:,i],j).reshape(-1,1)
fistCol=0
else:
#laggedData=pd.concat([laggedData,inputData.iloc[:,i].shift(j)],axis=1).dropna()
laggedData=np.append(laggedData,self.shift(inputData[:,i],j).reshape(-1,1), axis=1)
#laggedData.columns=names
return laggedData
#Calculate the Root mean squared error
def rmse(self, target,simdata):
#Length of the data
n = len(target)
#Number of NAs in the data
nans=np.isnan(target) | np.isnan(simdata)
nanNumb=sum(nans)
# RMSE
cost = np.sqrt(sum(np.power((target[nans==0] - simdata[nans==0]),2)/(n-nanNumb)))
return cost
#Calculate the Normalized Root mean squared error
def nrmse(self, target,simdata):
#Length of the data
n = len(target)
#Number of NAs in the data
nans=np.isnan(target) | np.isnan(simdata)
nanNumb=sum(nans)
# NRMSE
cost = np.sqrt(sum(np.power((target[nans==0] - simdata[nans==0]),2)/(n-nanNumb)))
cost=cost/np.mean(target[nans==0])
return cost
#Calculate the Porcentual Root mean squared error
def prmse(self, target,simdata):
#Length of the data
n = len(target)
#Number of NAs in the data
nans=np.isnan(target) | np.isnan(simdata)
nanNumb=sum(nans)
# PRMSE
cost = np.sqrt(sum(np.power((target[nans==0] - simdata[nans==0]),2)/(n-nanNumb)))
cost=cost/target[nans==0]
return cost
#Calculate Nash–Sutcliffe model efficiency coefficient
def nash(self, target,simdata):
#Length of the data
n = len(target)
#Number of NAs in the data
nans=np.isnan(target) | np.isnan(simdata)
nanNumb=sum(nans)
# Nash–Sutcliffe
cost = np.sqrt(sum(np.power((target[nans==0] - simdata[nans==0]),2)/(n-nanNumb)))
var=np.sum(np.power((target[nans==0]-np.mean(target[nans==0])),2))
cost=1-(cost/var)
return cost
#Calculate the russian hidrological criteria s/sdelta
def ssigmadelta(self, target,simdata,delta):
# Estimate the delta
i=np.arange(1,len(target) - delta)
deltas= target[i+delta] - target[i]
# Mean of the deltas
md = np.nanmean(deltas)
# SD of the deltas
sigmadelta = np.nanstd(deltas - md)
# RMSE
s=self.rmse(target,simdata)
#S/sigmadelta
ssd = s/sigmadelta
cost = ssd
return cost
#Fast linear regression method
def lmFast(self, y,x):
# add col of 1s
X = np.concatenate((np.ones((len(x),1), dtype=int), x), axis=1)
Y = np.array(y).reshape(-1, 1)
# take the ones with no nan value
indexNA=(np.sum(np.isnan(x),axis=1)!=0).reshape(-1, 1) | (np.isnan(y)).reshape(-1, 1)
indexNA=indexNA.reshape(-1)
X=X[indexNA==0,:]
Y=Y[indexNA==0]
coef = np.linalg.solve(X.T.dot(X), X.T.dot(Y))
# coef = np.linalg.lstsq(X, Y)[0]
# print("lm_ok : ",np.allclose(np.dot(X.T.dot(X), coef), X.T.dot(Y)))
return coef
#Predict with Fast linear regression method
def predictlmFast(self, w,x):
X = np.concatenate((np.ones((len(x),1), dtype=int), x), axis=1)
Y=w.T.dot(X.T)
return Y.reshape(-1)
# plot Performance
def plotPerformance(self, target,prediction,delta):
res=target-prediction
text1= "RMSE = " + np.array2string(np.round(self.rmse(target,prediction), decimals= 4)) + " "+"NRMSE = "+ np.array2string(np.round(self.nrmse(target,prediction), decimals = 4)) + " "+"NASH = "+ np.array2string(np.round(self.nash(target,prediction), decimals = 4)) + " "+"S/Sd = "+ np.array2string(np.round(self.ssigmadelta(target,prediction,delta), decimals = 4))
indOK=~np.isnan(target) & ~np.isnan(prediction)
indOK=indOK.reshape(-1)
targetOK=target.reshape(-1)[indOK]
predictionOK=prediction.reshape(-1)[indOK]
text2= "r = " + np.array2string(np.round(np.corrcoef(targetOK,predictionOK)[0,1], decimals= 4))
# plot it
t = np.arange(1, len(target)+1).reshape(-1,1)
fig = plt.figure(figsize=(20, 10))
gs = gridspec.GridSpec(2, 2, width_ratios=[3, 1])
plt.rcParams.update({'font.size': 22})
ax0 = plt.subplot(gs[0])
ax0.plot(t,target,marker=".",label='Target')
ax0.plot(t,prediction,"--",marker=".",color="#B8059A",label='Simuation')
ax0.scatter(t[t.shape[0]-1],prediction[prediction.shape[0]-1],marker="*",color="#B8059A",s=100,label='Forecast')
ax0.set_title(text1)
ax0.set_xlim([1,np.nanmax(t)+delta])
# ax0.set_ylim([np.nanmin(target),np.nanmax(target)])
ax0.legend()
ax1 = plt.subplot(gs[1])
ax1.scatter(target,
prediction,s=10,
color="black",marker="o")
ax1.set_title(text2)
ax1.set_xlim([np.nanmin(target),np.nanmax(target)])
ax1.set_ylim([np.nanmin(target),np.nanmax(target)])
# The regresion fit
fit=self.lmFast(target.reshape(-1, 1),prediction.reshape(-1, 1))
y1=self.predictlmFast(fit,target.reshape(-1, 1))
ax1.plot(target,y1,"--",color="#8B008B")
ax2 = plt.subplot(gs[2])
#ax2.stem(t,res,color="#05B851",markerfmt=" ")
ax2.stem(res, markerfmt=' ')
ax2.set_title("Residuals")
ax2.set_xlim([1,np.nanmax(t)])
ax2.set_ylim([np.nanmin(res),np.nanmax(res)])
ax3 = plt.subplot(gs[3])
n_bins=np.round(np.log(res.shape[0],order=2)+1).astype(int)
# n_bins=14
# N is the count in each bin, bins is the lower-limit of the bin
N, bins, patches = ax3.hist(res.reshape(-1)[indOK], bins=n_bins)
# We'll color code by height, but you could use any scalar
fracs = N.astype(float) / N.max()
# we need to normalize the data to 0..1 for the full range of the colormap
norm = colors.Normalize(fracs.min(), fracs.max())
# Now, we'll loop through our objects and set the color of each accordingly
for thisfrac, thispatch in zip(fracs, patches):
#color = plt.cm.viridis(norm(thisfrac))
color = plt.cm.viridis(norm(thisfrac)*(2/3))
thispatch.set_facecolor(color)
ax3.set_title("Hist Residuals")
plt.tight_layout()
return plt
# Interpolate a dataset using a linear model of
# all variables in the set
def interpolate(self, data):
# Create a copy to interpolate
InterpData=data
for i in np.arange(0,data.shape[1]):
# index of missing values
indexNA=np.isnan(data[:,i])
if np.sum(indexNA)!=0:
# Create a liner model
y_ok=data[indexNA==0,i]
x=np.delete(data, np.s_[i], axis=1)
x_ok=x[indexNA==0,:]
x_nan=x[indexNA==1,:]
fit1=self.lmFast(y_ok,x_ok)
# interpolate
tInerp1=self.predictlmFast(fit1, x_nan)
# update the df with the interpolated values
InterpData[indexNA,i]=tInerp1
tInerp2=InterpData[:,i]
# THE REST UNABLE TO BE INTERPOLATED
if np.sum(np.isnan(tInerp2))!=0:
indexNA=np.isnan(tInerp2)
index=np.arange(0,tInerp2.shape[0])
tInerp2[indexNA] =np.interp(index[indexNA==1],index[indexNA==0], tInerp2[indexNA==0])
# update the df with the interpolated values
InterpData[:,i]=tInerp2
return InterpData
#AppLy adaptative operator for forecast
def predict(self, targetData,inputData,lagConf,window,delta,numberCrossVal):
# Parameters
# =============================================================================
# inputData=input
# targetData=target
# =============================================================================
lagConfiguration=lagConf
windowSize=window
numberOfForecasts=numberCrossVal
# target+input data
# (it's not cheating because it use only the lags)
allData=np.append(targetData,inputData,axis=1)
inputLagedData=self.createLags(allData,lagConfiguration)
# trim the imput if the delta is > 1
if delta>1:
inputLagedData=inputLagedData[:inputLagedData.shape[0]-(delta-1)]
targetData=targetData[(delta-1):]
# target+laged data
fullData=np.concatenate((targetData.reshape(-1,1),inputLagedData.reshape(-1,np.nansum(lagConfiguration))), axis=1)
# Get the lenght of the data
dataLength=len(fullData)
# Get the number of predictors
numberOfLagedVaribles=inputLagedData.shape[1]
# Check if the number of observations in
# the calibration window is at least
# same size+2 of the number of preditor variables
if numberOfLagedVaribles+1>windowSize:
#You need a bigger calibration window
windowSize=numberOfLagedVaribles+2 #At least 2 degrees of freedom
# Extract the validation data set
#validationData=targetData[seq(dataLength-numberOfForecasts+1,dataLength)]
validationData=targetData[np.arange(dataLength-numberOfForecasts,dataLength)].reshape(-1,1)
# Create a vector to store the simulations
simulatedData=np.zeros((numberOfForecasts,1), dtype=float)
simulatedData.fill(np.nan)
# Realize the forecast for each calibration window
for j in range(0,numberOfForecasts):
# select the window for the specific forecast
windowIndex=np.arange(dataLength-numberOfForecasts+1+j-windowSize-1,
dataLength-numberOfForecasts+1+j)
# Calibration set for the window -1 index
train=fullData[windowIndex[np.arange(0,len(windowIndex)-1)],:]
test=fullData[windowIndex[len(windowIndex)-1],:].reshape(1,-1)
y=train[:,0].reshape(-1,1)
x=train[:,range(1,train.shape[1])]
# Create a liner model
# =============================================================================
fit=self.lmFast(y,x)
# =============================================================================
# forecast one index outsie of the calibration window set
# =============================================================================
simulatedData[j,0]=self.predictlmFast(fit,test[:,range(1,train.shape[1])])
# =============================================================================
#FORECAST OUTSIDE THE DATA SET
Xout=self.createLagsOut(allData, lagConfiguration)
#Trim it, just to get the last value
Xout=Xout[Xout.shape[0]-1,:].reshape(1,-1)
# Predict using latest data
forecast=self.predictlmFast(fit,Xout)
# Wrap it with nans for the indexes ot the forecast
nans=np.zeros(delta)
nans[:]=np.nan
#Target+nans for the delta
validationData=np.append(validationData,nans.reshape(-1,1)).reshape(-1,1)
#nans for the Target+delta
simulatedData=np.append(simulatedData,nans).reshape(-1,1)
#The last value is the target
simulatedData[simulatedData.shape[0]-1]=forecast
return validationData,simulatedData
# =============================================================================
#GA OPTIMIZATION FUNCTIONS
# =============================================================================
def initIndividual(self,icls, content):
return icls(content)
def initPopulation(self,pcls, ind_init,maxLag,popSize,gene_length):
# the lag configurations as single variables
singleVariables=np.identity(gene_length-1).astype(int)
for d in range(2,maxLag):
singleVariables=np.concatenate((singleVariables,d*np.identity(gene_length-1).astype(int)),axis=0)
# Add single lags with the average window
contents = np.concatenate(((np.round(maxLag/2)*np.ones(singleVariables.shape[0])).astype(int).reshape(-1,1),
singleVariables),axis=1).astype(int)
# the rest just random
randomPortion=popSize-contents.shape[0]
if randomPortion>0:
contents = np.concatenate((contents,
np.random.randint(0, high=maxLag, size=(randomPortion,contents.shape[1]))),
axis=0).astype(int)
return pcls(ind_init(c) for c in contents)
def decode(chromosome):
# Decode GA solution to windowSize and lagConf
windowSize = windowRange[chromosome[0]]
lagConf = np.array( chromosome[1:], dtype=np.int32)
return windowSize,lagConf
def objectiveFunction(self,chromosome):
# Decode GA solution to windowSize and lagConf
#windowSize,lagConf=decode(chromosome)
windowSize = self.windowRange[chromosome[0]]
lagConf = np.array( chromosome[1:], dtype=np.int32)
# Return fitness score of 99999 if window_size or num_unit is zero
if np.sum(lagConf) == 0:
cost=99999
# print('Validation s/sd: ',cost,'\n')
return cost,
else:
target,forecast=self.predict(self.targetData,self.inputData,
lagConf,
windowSize,self.delta,
self.test_length)
cost=self.ssigmadelta(target,forecast,self.delta)
if np.isnan(cost) or cost==0:
bizarro=666
return cost,
#Fit adaptative operator for forecast
def fit(self, targetData,inputData,maxLag,windowLimits,delta,numberCrossVal,ngen,popSize):
#Ger some parameters
test_length=numberCrossVal
minWindow=windowLimits[0]
maxWindow=windowLimits[1]
# =============================================================================
# SET OPTIMIZATION PARAMETERS
# =============================================================================
#A triky way to pass bay the | |
path):
if self.isconst:
return self.value()
vals = []
for kid in self.kids:
vals.append(await tostr(await kid.compute(path)))
return '.'.join(vals)
class TagMatch(TagName):
'''
Like TagName, but can have asterisks
'''
def hasglob(self):
assert self.kids
return any('*' in kid.valu for kid in self.kids if isinstance(kid, Const))
class Cmpr(Value):
pass
class Const(Value):
pass
class Bool(Const):
pass
class List(Value):
def repr(self):
return 'List: %s' % self.kids
async def runtval(self, runt):
return [await k.runtval(runt) for k in self.kids]
async def compute(self, path):
return [await k.compute(path) for k in self.kids]
def value(self):
return [k.value() for k in self.kids]
class RelProp(RunValue):
def __init__(self, kids=()):
RunValue.__init__(self, kids=kids)
assert len(kids) == 1
kid = kids[0]
if isinstance(kid, Const):
self.isconst = True
valu = kid.value()
self.valu = valu[1:]
return
assert isinstance(kid, VarValue)
self.isconst = False
self.valu = s_common.novalu
def value(self):
assert self.isconst
return self.valu
async def runtval(self, runt):
if self.isconst:
return self.value()
return await self.kids[0].runtval(runt)
class UnivProp(RelProp):
async def runtval(self, runt):
if self.isconst:
return self.value()
return '.' + await self.kids[0].runtval(runt)
def value(self):
assert self.isconst
return '.' + self.valu
class AbsProp(Value):
pass
class Edit(Oper):
pass
class EditParens(Edit):
async def run(self, runt, genr):
nodeadd = self.kids[0]
assert isinstance(nodeadd, EditNodeAdd)
formname = nodeadd.kids[0].value()
runt.layerConfirm(('node', 'add', formname))
# create an isolated generator for the add vs edit
if nodeadd.isruntsafe(runt):
# Luke, let the (node,path) tuples flow through you
async for item in genr:
yield item
# isolated runtime stack...
genr = s_common.agen()
for oper in self.kids:
genr = oper.run(runt, genr)
async for item in genr:
yield item
else:
# do a little genr-jig.
async for node, path in genr:
yield node, path
async def editgenr():
async for item in nodeadd.addFromPath(path):
yield item
fullgenr = editgenr()
for oper in self.kids[1:]:
fullgenr = oper.run(runt, fullgenr)
async for item in fullgenr:
yield item
class EditNodeAdd(Edit):
def prepare(self):
oper = self.kids[1].value()
self.name = self.kids[0].value()
self.form = self.core.model.form(self.name)
if self.form is None:
raise s_exc.NoSuchForm(name=self.name)
self.excignore = (s_exc.BadTypeValu, s_exc.BadTypeValu) if oper == '?=' else ()
def isruntsafe(self, runt):
return self.kids[2].isRuntSafe(runt)
async def addFromPath(self, path):
'''
Add a node using the context from path.
NOTE: CALLER MUST CHECK PERMS
'''
vals = await self.kids[2].compute(path)
# for now, we have a conflict with a Node instance and prims
# if not isinstance(vals, s_stormtypes.Node):
# vals = await s_stormtypes.toprim(vals)
for valu in self.form.type.getTypeVals(vals):
try:
newn = await path.runt.snap.addNode(self.name, valu)
except self.excignore:
pass
else:
yield newn, path.runt.initPath(newn)
async def run(self, runt, genr):
# the behavior here is a bit complicated...
# single value add (runtime computed per node )
# In the cases below, $hehe is input to the storm runtime vars.
# case 1: [ foo:bar="lols" ]
# case 2: [ foo:bar=$hehe ]
# case 2: [ foo:bar=$lib.func(20, $hehe) ]
# case 3: ($foo, $bar) = $hehe [ foo:bar=($foo, $bar) ]
# iterative add ( node add is executed once per inbound node )
# case 1: <query> [ foo:bar=(:baz, 20) ]
# case 2: <query> [ foo:bar=($node, 20) ]
# case 2: <query> $blah=:baz [ foo:bar=($blah, 20) ]
runtsafe = self.isruntsafe(runt)
async def feedfunc():
if not runtsafe:
first = True
async for node, path in genr:
# must reach back first to trigger sudo / etc
if first:
runt.layerConfirm(('node', 'add', self.name))
first = False
# must use/resolve all variables from path before yield
async for item in self.addFromPath(path):
yield item
yield node, path
await asyncio.sleep(0)
else:
runt.layerConfirm(('node', 'add', self.name))
valu = await self.kids[2].runtval(runt)
valu = await s_stormtypes.toprim(valu)
for valu in self.form.type.getTypeVals(valu):
try:
node = await runt.snap.addNode(self.name, valu)
except self.excignore:
continue
yield node, runt.initPath(node)
await asyncio.sleep(0)
if runtsafe:
async for node, path in genr:
yield node, path
async for item in s_base.schedGenr(feedfunc()):
yield item
class EditPropSet(Edit):
async def run(self, runt, genr):
oper = self.kids[1].value()
excignore = (s_exc.BadTypeValu,) if oper in ('?=', '?+=', '?-=') else ()
isadd = oper in ('+=', '?+=')
issub = oper in ('-=', '?-=')
async for node, path in genr:
name = await self.kids[0].compute(path)
valu = await self.kids[2].compute(path)
valu = await s_stormtypes.toprim(valu)
prop = node.form.props.get(name)
if prop is None:
raise s_exc.NoSuchProp(name=name, form=node.form.name)
if not node.form.isrunt:
# runt node property permissions are enforced by the callback
runt.layerConfirm(('node', 'prop', 'set', prop.full))
try:
if isadd or issub:
if not isinstance(prop.type, s_types.Array):
mesg = f'Property set using ({oper}) is only valid on arrays.'
raise s_exc.StormRuntimeError(mesg)
arry = node.get(name)
if arry is None:
arry = ()
if isadd:
# this new valu will get normed by the array prop
valu = arry + (valu,)
else:
# make arry mutable
arry = list(arry)
# we cant remove something we cant norm...
# but that also means it can't be in the array so...
norm, info = prop.type.arraytype.norm(valu)
try:
arry.remove(norm)
except ValueError:
pass
valu = arry
await node.set(name, valu)
except excignore:
pass
yield node, path
await asyncio.sleep(0)
class EditPropDel(Edit):
async def run(self, runt, genr):
async for node, path in genr:
name = await self.kids[0].compute(path)
prop = node.form.props.get(name)
if prop is None:
raise s_exc.NoSuchProp(name=name, form=node.form.name)
runt.layerConfirm(('node', 'prop', 'del', prop.full))
await node.pop(name)
yield node, path
await asyncio.sleep(0)
class EditUnivDel(Edit):
async def run(self, runt, genr):
univprop = self.kids[0]
assert isinstance(univprop, UnivProp)
if univprop.isconst:
name = self.kids[0].value()
univ = runt.model.props.get(name)
if univ is None:
raise s_exc.NoSuchProp(name=name)
async for node, path in genr:
if not univprop.isconst:
name = await univprop.compute(path)
univ = runt.model.props.get(name)
if univ is None:
raise s_exc.NoSuchProp(name=name)
runt.layerConfirm(('node', 'prop', 'del', name))
await node.pop(name)
yield node, path
await asyncio.sleep(0)
class N1Walk(Oper):
async def walkNodeEdges(self, runt, node, verb=None):
async for _, iden in node.iterEdgesN1(verb=verb):
buid = s_common.uhex(iden)
walknode = await runt.snap.getNodeByBuid(buid)
if walknode is not None:
yield walknode
async def run(self, runt, genr):
@s_cache.memoize(size=100)
def isDestForm(formname, destforms):
if not isinstance(destforms, tuple):
destforms = (destforms, )
for destform in destforms:
if not isinstance(destform, str):
mesg = f'walk operation expected a string or list for dest. got: {destform!r}'
raise s_exc.StormRuntimeError(mesg=mesg)
if destform == '*':
return True
if formname == destform:
return True
return False
async for node, path in genr:
verb = await self.kids[0].compute(path)
verb = await s_stormtypes.toprim(verb)
dest = await self.kids[1].compute(path)
dest = await s_stormtypes.toprim(dest)
if isinstance(verb, str):
if verb == '*':
verb = None
async for walknode in self.walkNodeEdges(runt, node, verb=verb):
if not isDestForm(walknode.form.name, dest):
await asyncio.sleep(0)
continue
yield walknode, path.fork(walknode)
elif isinstance(verb, (list, tuple)):
for verb in verb:
if verb == '*':
verb = None
async for walknode in self.walkNodeEdges(runt, node, verb=verb):
if not isDestForm(walknode.form.name, dest):
await asyncio.sleep(0)
continue
yield walknode, path.fork(walknode)
else:
mesg = f'walk operation expected a string or list. got: {verb!r}.'
raise s_exc.StormRuntimeError(mesg=mesg)
class N2Walk(N1Walk):
async def walkNodeEdges(self, runt, node, verb=None):
async for _, iden in node.iterEdgesN2(verb=verb):
buid = s_common.uhex(iden)
walknode = await runt.snap.getNodeByBuid(buid)
if walknode is not None:
yield walknode
class EditEdgeAdd(Edit):
def __init__(self, kids=(), n2=False):
Edit.__init__(self, kids=kids)
self.n2 = n2
async def run(self, runt, genr):
# SubQuery -> Query
query = self.kids[1].kids[0]
hits = set()
def allowed(x):
if x in hits:
return
runt.layerConfirm(('node', 'edge', 'add', x))
hits.add(x)
async for node, path in genr:
iden = node.iden()
verb = await self.kids[0].compute(path)
# TODO this will need a toprim once Str is in play
allowed(verb)
varz = {}
varz.update(runt.vars)
varz.update(path.vars)
opts = {
'vars': varz,
}
with runt.snap.getStormRuntime(opts=opts, user=runt.user) as runt:
# TODO perhaps chunk the edge edits?
async for subn, subp in runt.iterStormQuery(query):
if self.n2:
await subn.addEdge(verb, iden)
else:
await node.addEdge(verb, subn.iden())
yield node, path
class EditEdgeDel(Edit):
def __init__(self, kids=(), n2=False):
Edit.__init__(self, kids=kids)
self.n2 = n2
async def run(self, runt, genr):
query = self.kids[1].kids[0]
hits = set()
def allowed(x):
if x in hits:
return
runt.layerConfirm(('node', 'edge', 'del', x))
hits.add(x)
async for node, path in genr:
iden = node.iden()
verb = await self.kids[0].compute(path)
# TODO this will need a toprim once Str is in play
allowed(verb)
varz = {}
varz.update(runt.vars)
varz.update(path.vars)
opts = {
'vars': varz,
}
with runt.snap.getStormRuntime(opts=opts, user=runt.user) as runt:
# TODO perhaps chunk the edge edits?
async for subn, subp in runt.iterStormQuery(query):
if self.n2:
await subn.delEdge(verb, iden)
else:
await node.delEdge(verb, subn.iden())
yield node, path
class EditTagAdd(Edit):
async def run(self, runt, genr):
if len(self.kids) > 1 and isinstance(self.kids[0], Const) and self.kids[0].value() == '?':
oper_offset = 1
else:
| |
"""OpenMC transport operator
This module implements a transport operator for OpenMC so that it can be used by
depletion integrators. The implementation makes use of the Python bindings to
OpenMC's C API so that reading tally results and updating material number
densities is all done in-memory instead of through the filesystem.
"""
import copy
from collections import OrderedDict
import os
import xml.etree.ElementTree as ET
from warnings import warn
from pathlib import Path
import numpy as np
from uncertainties import ufloat
import openmc
from openmc.checkvalue import check_value
import openmc.lib
from openmc.mpi import comm
from .abc import TransportOperator, OperatorResult
from .atom_number import AtomNumber
from .reaction_rates import ReactionRates
from .results_list import ResultsList
from .helpers import (
DirectReactionRateHelper, ChainFissionHelper, ConstantFissionYieldHelper,
FissionYieldCutoffHelper, AveragedFissionYieldHelper, EnergyScoreHelper,
SourceRateHelper, FluxCollapseHelper)
__all__ = ["Operator", "OperatorResult"]
def _distribute(items):
"""Distribute items across MPI communicator
Parameters
----------
items : list
List of items of distribute
Returns
-------
list
Items assigned to process that called
"""
min_size, extra = divmod(len(items), comm.size)
j = 0
for i in range(comm.size):
chunk_size = min_size + int(i < extra)
if comm.rank == i:
return items[j:j + chunk_size]
j += chunk_size
class Operator(TransportOperator):
"""OpenMC transport operator for depletion.
Instances of this class can be used to perform depletion using OpenMC as the
transport operator. Normally, a user needn't call methods of this class
directly. Instead, an instance of this class is passed to an integrator
class, such as :class:`openmc.deplete.CECMIntegrator`.
Parameters
----------
geometry : openmc.Geometry
OpenMC geometry object
settings : openmc.Settings
OpenMC Settings object
chain_file : str, optional
Path to the depletion chain XML file. Defaults to the file
listed under ``depletion_chain`` in
:envvar:`OPENMC_CROSS_SECTIONS` environment variable.
prev_results : ResultsList, optional
Results from a previous depletion calculation. If this argument is
specified, the depletion calculation will start from the latest state
in the previous results.
diff_burnable_mats : bool, optional
Whether to differentiate burnable materials with multiple instances.
Volumes are divided equally from the original material volume.
Default: False.
normalization_mode : {"energy-deposition", "fission-q", "source-rate"}
Indicate how tally results should be normalized. ``"energy-deposition"``
computes the total energy deposited in the system and uses the ratio of
the power to the energy produced as a normalization factor.
``"fission-q"`` uses the fission Q values from the depletion chain to
compute the total energy deposited. ``"source-rate"`` normalizes
tallies based on the source rate (for fixed source calculations).
fission_q : dict, optional
Dictionary of nuclides and their fission Q values [eV]. If not given,
values will be pulled from the ``chain_file``. Only applicable
if ``"normalization_mode" == "fission-q"``
dilute_initial : float, optional
Initial atom density [atoms/cm^3] to add for nuclides that are zero
in initial condition to ensure they exist in the decay chain.
Only done for nuclides with reaction rates.
Defaults to 1.0e3.
fission_yield_mode : {"constant", "cutoff", "average"}
Key indicating what fission product yield scheme to use. The
key determines what fission energy helper is used:
* "constant": :class:`~openmc.deplete.helpers.ConstantFissionYieldHelper`
* "cutoff": :class:`~openmc.deplete.helpers.FissionYieldCutoffHelper`
* "average": :class:`~openmc.deplete.helpers.AveragedFissionYieldHelper`
The documentation on these classes describe their methodology
and differences. Default: ``"constant"``
fission_yield_opts : dict of str to option, optional
Optional arguments to pass to the helper determined by
``fission_yield_mode``. Will be passed directly on to the
helper. Passing a value of None will use the defaults for
the associated helper.
reaction_rate_mode : {"direct", "flux"}, optional
Indicate how one-group reaction rates should be calculated. The "direct"
method tallies transmutation reaction rates directly. The "flux" method
tallies a multigroup flux spectrum and then collapses one-group reaction
rates after a transport solve (with an option to tally some reaction
rates directly).
.. versionadded:: 0.12.1
reaction_rate_opts : dict, optional
Keyword arguments that are passed to the reaction rate helper class.
When ``reaction_rate_mode`` is set to "flux", energy group boundaries
can be set using the "energies" key. See the
:class:`~openmc.deplete.helpers.FluxCollapseHelper` class for all
options.
.. versionadded:: 0.12.1
reduce_chain : bool, optional
If True, use :meth:`openmc.deplete.Chain.reduce` to reduce the
depletion chain up to ``reduce_chain_level``. Default is False.
.. versionadded:: 0.12
reduce_chain_level : int, optional
Depth of the search when reducing the depletion chain. Only used
if ``reduce_chain`` evaluates to true. The default value of
``None`` implies no limit on the depth.
.. versionadded:: 0.12
Attributes
----------
geometry : openmc.Geometry
OpenMC geometry object
settings : openmc.Settings
OpenMC settings object
dilute_initial : float
Initial atom density [atoms/cm^3] to add for nuclides that
are zero in initial condition to ensure they exist in the decay
chain. Only done for nuclides with reaction rates.
output_dir : pathlib.Path
Path to output directory to save results.
round_number : bool
Whether or not to round output to OpenMC to 8 digits.
Useful in testing, as OpenMC is incredibly sensitive to exact values.
number : openmc.deplete.AtomNumber
Total number of atoms in simulation.
nuclides_with_data : set of str
A set listing all unique nuclides available from cross_sections.xml.
chain : openmc.deplete.Chain
The depletion chain information necessary to form matrices and tallies.
reaction_rates : openmc.deplete.ReactionRates
Reaction rates from the last operator step.
burnable_mats : list of str
All burnable material IDs
heavy_metal : float
Initial heavy metal inventory [g]
local_mats : list of str
All burnable material IDs being managed by a single process
prev_res : ResultsList or None
Results from a previous depletion calculation. ``None`` if no
results are to be used.
diff_burnable_mats : bool
Whether to differentiate burnable materials with multiple instances
cleanup_when_done : bool
Whether to finalize and clear the shared library memory when the
depletion operation is complete. Defaults to clearing the library.
"""
_fission_helpers = {
"average": AveragedFissionYieldHelper,
"constant": ConstantFissionYieldHelper,
"cutoff": FissionYieldCutoffHelper,
}
def __init__(self, geometry, settings, chain_file=None, prev_results=None,
diff_burnable_mats=False, normalization_mode="fission-q",
fission_q=None, dilute_initial=1.0e3,
fission_yield_mode="constant", fission_yield_opts=None,
reaction_rate_mode="direct", reaction_rate_opts=None,
reduce_chain=False, reduce_chain_level=None):
check_value('fission yield mode', fission_yield_mode,
self._fission_helpers.keys())
check_value('normalization mode', normalization_mode,
('energy-deposition', 'fission-q', 'source-rate'))
if normalization_mode != "fission-q":
if fission_q is not None:
warn("Fission Q dictionary will not be used")
fission_q = None
super().__init__(chain_file, fission_q, dilute_initial, prev_results)
self.round_number = False
self.settings = settings
self.geometry = geometry
self.diff_burnable_mats = diff_burnable_mats
self.cleanup_when_done = True
# Reduce the chain before we create more materials
if reduce_chain:
all_isotopes = set()
for material in geometry.get_all_materials().values():
if not material.depletable:
continue
for name, _dens_percent, _dens_type in material.nuclides:
all_isotopes.add(name)
self.chain = self.chain.reduce(all_isotopes, reduce_chain_level)
# Differentiate burnable materials with multiple instances
if self.diff_burnable_mats:
self._differentiate_burnable_mats()
# Clear out OpenMC, create task lists, distribute
openmc.reset_auto_ids()
self.burnable_mats, volume, nuclides = self._get_burnable_mats()
self.local_mats = _distribute(self.burnable_mats)
# Generate map from local materials => material index
self._mat_index_map = {
lm: self.burnable_mats.index(lm) for lm in self.local_mats}
if self.prev_res is not None:
# Reload volumes into geometry
prev_results[-1].transfer_volumes(geometry)
# Store previous results in operator
# Distribute reaction rates according to those tracked
# on this process
if comm.size == 1:
self.prev_res = prev_results
else:
self.prev_res = ResultsList()
mat_indexes = _distribute(range(len(self.burnable_mats)))
for res_obj in prev_results:
new_res = res_obj.distribute(self.local_mats, mat_indexes)
self.prev_res.append(new_res)
# Determine which nuclides have incident neutron data
self.nuclides_with_data = self._get_nuclides_with_data()
# Select nuclides with data that are also in the chain
self._burnable_nucs = [nuc.name for nuc in self.chain.nuclides
if nuc.name in self.nuclides_with_data]
# Extract number densities from the geometry / previous depletion run
self._extract_number(self.local_mats, volume, nuclides, self.prev_res)
# Create reaction rates array
self.reaction_rates = ReactionRates(
self.local_mats, self._burnable_nucs, self.chain.reactions)
# Get classes to assist working with tallies
if reaction_rate_mode == "direct":
self._rate_helper = DirectReactionRateHelper(
self.reaction_rates.n_nuc, self.reaction_rates.n_react)
elif reaction_rate_mode == "flux":
if reaction_rate_opts is None:
reaction_rate_opts = {}
# Ensure energy group boundaries were specified
if 'energies' not in reaction_rate_opts:
raise ValueError(
"Energy group boundaries must be specified in the "
"reaction_rate_opts argument when reaction_rate_mode is"
"set to 'flux'.")
self._rate_helper = FluxCollapseHelper(
self.reaction_rates.n_nuc,
self.reaction_rates.n_react,
**reaction_rate_opts
)
else:
raise ValueError("Invalid reaction rate mode.")
if normalization_mode == "fission-q":
self._normalization_helper = ChainFissionHelper()
elif normalization_mode == "energy-deposition":
score = "heating" if settings.photon_transport else "heating-local"
self._normalization_helper = EnergyScoreHelper(score)
else:
self._normalization_helper = SourceRateHelper()
# Select and create fission yield helper
fission_helper = self._fission_helpers[fission_yield_mode]
fission_yield_opts = (
{} if fission_yield_opts is None else fission_yield_opts)
self._yield_helper = fission_helper.from_operator(
self, **fission_yield_opts)
def __call__(self, vec, source_rate):
"""Runs a simulation.
Simulation will abort under the following circumstances:
1) No energy is computed using OpenMC tallies.
Parameters
----------
vec : list of numpy.ndarray
| |
are not correct")
values = [_[1] for _ in values[i1:i2]]
return self._private_getclass()(self.header, values)
def union(self, table):
"""
@param table table
@return table(with the same number of columns)
concatenates two tables by rows, they must have the same header, rows of both tables are merged into a single matrix
Example:
::
union = table.union(table2)
"""
if len(self.header) != len(table.header):
raise ValueError( # pragma: no cover
"tables do not have the same number of columns\ntbl1: %s\ntbl2: %s" % (
",".join(self.header), ",".join(table.header)))
for a, b in zip(self.header, table.header):
if a != b:
raise ValueError( # pragma: no cover
"tables do not have the same column names")
return self._private_getclass()(self.header, self.values + table.values)
def concatenate(self, table, addPrefix=""):
"""
concatenates two tables by columns
@param table table
@param addPrefix add a prefix to each column from table
@return table (with the same number of rows as the longest one)
"""
maxr = max(len(self), len(table))
header = self.header + [addPrefix + h for h in table.header]
values = []
for i in range(0, maxr):
r1 = self.values[i] if i < len(self) else [None] * len(self.header)
r2 = table.values[i] if i < len(
table) else [None] * len(self.table)
values.append(r1 + r2)
return self._private_getclass()(header, values)
def random(self, n, unique=False):
"""
select n random row from the table, returns a table
@param n number of desired random rows
@param unique draws unique rows or non unique rows
(tirage sans remise ou avec remise)
@return a table
Example:
::
rnd = table.random(10)
"""
if unique:
if n > len(self):
raise ValueError( # pragma: no cover
"number of desired random rows is higher "
"than the number of rows in the table")
index = {}
while len(index) < n:
h = random.randint(0, len(self) - 1)
index[h] = 0
values = [self.values[h] for h in index]
return self._private_getclass()(self.header, values)
else:
values = []
for i in range(0, n):
h = random.randint(0, len(self) - 1)
values.append(self.values[h])
return self._private_getclass()(self.header, values)
def todict(self, functionKey, functionValue, useList=False):
"""
convert the table as a dictionary { key:value }
each of them is defined by functions.
@param functionKey defines the key
@param functionValue defines the value
@param useList if there are multiple rows sharing the same key, it should be true,
all values are stored in a list
@return a dictionary { key:row } or { key: [row1, row2, ...] }
Example:
::
d = table.todict(lambda v: v["name"], lambda v: v["d_b"], True)
"""
res = {}
if useList:
for row in self.values:
v = self._interpret_row(row)
key = functionKey(v)
val = functionValue(v)
if key in res:
res[key].append(val)
else:
res[key] = [val]
else:
for row in self.values:
v = self._interpret_row(row)
key = functionKey(v)
val = functionValue(v)
res[key] = val
return res
def reduce_dict(self, functionKey, functionValue, uselist=False):
"""
@see me todict
"""
return self.todict(functionKey, functionValue, uselist)
def select(self, functionRow):
"""
@param functionRow fonction
@return table
Example:
::
d = table.select(lambda v:(v["name"], v["d_b"]))
print(list(d))
"""
for row in self.values:
v = self._interpret_row(row)
nr = functionRow(v)
yield nr
def modify_all(self, modification_function):
"""
apply the same modification to every number
@param modification_function modification to apply to every number
@return new table
The signature of the function is the following one:
::
def function(value, column_name):
# ....
return new_value
Example:
::
tbl = tbl.modify_all(lambda v,c: {"string":"", "numerical":0}.get(c,None) if v is None else v)
"""
values = []
for row in self.values:
r = []
for v, h in zip(row, self.header):
r.append(modification_function(v, h))
values.append(r)
return self._private_getclass()(self.header, values)
def dcast(self, functionKey, functionInstance, full=True):
"""
@see me multiply_column_by_row_instance
"""
return self.multiply_column_by_row_instance(functionKey, functionInstance, full)
def multiply_column_by_row_instance(self, functionKey, functionInstance, full=True):
"""
@param functionKey defines a key(function)
@param functionInstance defines a second key(will be moved to the columns dimension)
@param full introduces missing values for not found combinations
@return a table
If a column contains a finite set of value, for example,
we have the temperature for several cities organized like if
it were a table from a database: city, date, temperatue.
We would like to get another table where we have:
date temparature_city1 temperature_city2...
Then we would type:
Example:
::
mul = table.multiply_column_by_row_instance(
lambda v: v["date"],
lambda v: v["city"])
The input table would be like:
::
city date
A jan
A feb
B feb
It returns:
::
KEY A|city A|date B|city B|date
feb A feb B feb
jan A jan None None
"""
values = [functionInstance(self._interpret_row(row))
for row in self.values]
distinct = {}
for v in values:
distinct[v] = 0
distinct = [_ for _ in distinct]
distinct.sort()
table1 = copy.deepcopy(self)
table = None
header = copy.copy(table1.header)
orig = len(header)
nameKey = "~KEY~"
while nameKey in header:
nameKey += "*"
nbJoin = 0
for val in distinct:
table2 = table1.filter(
lambda v, val=val: functionInstance(v) == val)
if table is None:
table = table2.copy()
else:
colkey = table.header[0]
table = table.innerjoin(table2, functionKey if nbJoin == 0 else (lambda v, c=colkey: v[c]),
functionKey, nameKey=nameKey,
prefixToAdd=str(val) + "|",
full=full, keepKey=nbJoin == 0,
putKeyInColumn=None if nbJoin == 0 else 0,
uniqueKey=True)
if nbJoin == 0:
head = []
nb = 0
for h in table.header:
if not h.endswith("~") and nb < orig:
head.append("%s|%s" % (distinct[0], h))
nb += 1
else:
head.append(h)
header = ["KEY"] + head[1:]
table = self._private_getclass()(header, table.values)
nbJoin += 1
if nbJoin == 0:
head = []
nb = 0
for h in table.header:
if not h.endswith("~") and nb < orig:
head.append("%s|%s" % (distinct[0], h))
nb += 1
else:
head.append(h)
values = []
for row in self.values:
v = self._interpret_row(row)
r = [functionKey(v)] + row
values.append(r)
header = ["KEY"] + head
table = self._private_getclass()(header, values)
return table
def create_index(self, functionIndex):
"""
this method creates an index,
to get an indexes row, use method get
Example:
::
table.create_index(lambda v:(v["name"], v["d_a"]))
row = table.get(('A', 1.1))
value = table.get(('A', 1.1), 2)
"""
self.indexspecial = {}
for row in self.values:
v = self._interpret_row(row)
nr = functionIndex(v)
if nr in self.indexspecial:
raise KeyError(
"unable to add %s because it is already present" % str(nr))
self.indexspecial[nr] = row
return self
def get(self, rowIndex, column=None):
"""
use the index created by method create_index
Example:
::
table.create_index(lambda v:(v["name"], v["d_a"]))
row = table.get(('A', 1.1))
value = table.get(('A', 1.1), 2)
"""
if "indexspecial" not in self.__dict__:
raise Exception("no index was created")
row = self.indexspecial[rowIndex]
if column is None:
return row
elif isinstance(column, int):
return row[column]
else:
return row[self.index[column]]
def avg_std(self, functionValue, functionWeight=lambda v: 1):
"""
returns the average and standard deviation
"""
avg = 0.
std = 0.
n = 0.
for i, row in enumerate(self.values):
v = self._interpret_row(row)
x = float(functionValue(v))
w = functionWeight(v)
avg += x * w
std += x * x * w
n += w
if n != 0:
avg /= n
std /= n
std -= avg * avg
std = math.sqrt(std)
else:
avg = 0.
std = 0.
return avg, std
def add_column_cumulative(self, column_index, column_name, functionIndex, functionValue,
normalize=False, reverse=False, cumulative=True, functionSort=None):
"""
also called the Gini function
Example:
::
table.add_column_cumulative("index_%s" % col, "dist_%s" % col,
lambda v: v["sum_nbclient"], lambda v: v[col],
functionSort = lambda v: v [col] / v["sum_nbclient"],
normalize=True)
"""
if functionSort is None:
functionSort = functionValue
val = []
for row in self.values:
v = self._interpret_row(row)
i = functionIndex(v)
s = functionSort(v)
v = functionValue(v)
val.append((s, i, v))
val.sort(reverse=reverse)
if cumulative:
res = [(0., 0.)]
for s, i, v in val:
res.append((i + res[-1][0], v + res[-1][1]))
del res[0]
if normalize:
sumi = res[-1][0]
sumv = res[-1][1]
if sumi != 0 and sumv != 0:
res = [(_[0] / sumi, _[1] / sumv) for _ in res]
else:
raise ZeroDivisionError(
"cannot divide by zero, all indexes or all values are null")
else:
res = [(i, v) for s, i, v in val]
if normalize:
sumi = sum([_[0] for _ in res])
sumv = sum([_[1] for _ in res])
if sumi != 0 and sumv != 0:
res = [(_[0] / sumi, _[1] / sumv) for _ in | |
<filename>wasm_bindgen/raze/crates.bzl<gh_stars>0
"""
@generated
cargo-raze crate workspace functions
DO NOT EDIT! Replaced on runs of cargo-raze
"""
load("@bazel_tools//tools/build_defs/repo:git.bzl", "new_git_repository") # buildifier: disable=load
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") # buildifier: disable=load
load("@bazel_tools//tools/build_defs/repo:utils.bzl", "maybe") # buildifier: disable=load
def rules_rust_wasm_bindgen_fetch_remote_crates():
"""This function defines a collection of repos and should be called in a WORKSPACE file"""
maybe(
http_archive,
name = "rules_rust_wasm_bindgen__backtrace_sys__0_1_29",
url = "https://crates.io/api/v1/crates/backtrace-sys/0.1.29/download",
type = "tar.gz",
sha256 = "12cb9f1eef1d1fc869ad5a26c9fa48516339a15e54a227a25460fc304815fdb3",
strip_prefix = "backtrace-sys-0.1.29",
build_file = Label("//wasm_bindgen/raze/remote:backtrace-sys-0.1.29.BUILD"),
)
maybe(
http_archive,
name = "rules_rust_wasm_bindgen__bumpalo__2_6_0",
url = "https://crates.io/api/v1/crates/bumpalo/2.6.0/download",
type = "tar.gz",
sha256 = "ad807f2fc2bf185eeb98ff3a901bd46dc5ad58163d0fa4577ba0d25674d71708",
strip_prefix = "bumpalo-2.6.0",
build_file = Label("//wasm_bindgen/raze/remote:bumpalo-2.6.0.BUILD"),
)
maybe(
http_archive,
name = "rules_rust_wasm_bindgen__cc__1_0_60",
url = "https://crates.io/api/v1/crates/cc/1.0.60/download",
type = "tar.gz",
sha256 = "ef611cc68ff783f18535d77ddd080185275713d852c4f5cbb6122c462a7a825c",
strip_prefix = "cc-1.0.60",
build_file = Label("//wasm_bindgen/raze/remote:cc-1.0.60.BUILD"),
)
maybe(
http_archive,
name = "rules_rust_wasm_bindgen__cfg_if__0_1_10",
url = "https://crates.io/api/v1/crates/cfg-if/0.1.10/download",
type = "tar.gz",
sha256 = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822",
strip_prefix = "cfg-if-0.1.10",
build_file = Label("//wasm_bindgen/raze/remote:cfg-if-0.1.10.BUILD"),
)
maybe(
http_archive,
name = "rules_rust_wasm_bindgen__lazy_static__1_4_0",
url = "https://crates.io/api/v1/crates/lazy_static/1.4.0/download",
type = "tar.gz",
sha256 = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646",
strip_prefix = "lazy_static-1.4.0",
build_file = Label("//wasm_bindgen/raze/remote:lazy_static-1.4.0.BUILD"),
)
maybe(
http_archive,
name = "rules_rust_wasm_bindgen__libc__0_2_77",
url = "https://crates.io/api/v1/crates/libc/0.2.77/download",
type = "tar.gz",
sha256 = "f2f96b10ec2560088a8e76961b00d47107b3a625fecb76dedb29ee7ccbf98235",
strip_prefix = "libc-0.2.77",
build_file = Label("//wasm_bindgen/raze/remote:libc-0.2.77.BUILD"),
)
maybe(
http_archive,
name = "rules_rust_wasm_bindgen__log__0_4_11",
url = "https://crates.io/api/v1/crates/log/0.4.11/download",
type = "tar.gz",
sha256 = "4fabed175da42fed1fa0746b0ea71f412aa9d35e76e95e59b192c64b9dc2bf8b",
strip_prefix = "log-0.4.11",
build_file = Label("//wasm_bindgen/raze/remote:log-0.4.11.BUILD"),
)
maybe(
http_archive,
name = "rules_rust_wasm_bindgen__proc_macro2__0_4_30",
url = "https://crates.io/api/v1/crates/proc-macro2/0.4.30/download",
type = "tar.gz",
sha256 = "cf3d2011ab5c909338f7887f4fc896d35932e29146c12c8d01da6b22a80ba759",
strip_prefix = "proc-macro2-0.4.30",
build_file = Label("//wasm_bindgen/raze/remote:proc-macro2-0.4.30.BUILD"),
)
maybe(
http_archive,
name = "rules_rust_wasm_bindgen__quote__0_6_13",
url = "https://crates.io/api/v1/crates/quote/0.6.13/download",
type = "tar.gz",
sha256 = "6ce23b6b870e8f94f81fb0a363d65d86675884b34a09043c81e5562f11c1f8e1",
strip_prefix = "quote-0.6.13",
build_file = Label("//wasm_bindgen/raze/remote:quote-0.6.13.BUILD"),
)
maybe(
http_archive,
name = "rules_rust_wasm_bindgen__syn__0_15_44",
url = "https://crates.io/api/v1/crates/syn/0.15.44/download",
type = "tar.gz",
sha256 = "9ca4b3b69a77cbe1ffc9e198781b7acb0c7365a883670e8f1c1bc66fba79a5c5",
strip_prefix = "syn-0.15.44",
build_file = Label("//wasm_bindgen/raze/remote:syn-0.15.44.BUILD"),
)
maybe(
http_archive,
name = "rules_rust_wasm_bindgen__unicode_xid__0_1_0",
url = "https://crates.io/api/v1/crates/unicode-xid/0.1.0/download",
type = "tar.gz",
sha256 = "fc72304796d0818e357ead4e000d19c9c174ab23dc11093ac919054d20a6a7fc",
strip_prefix = "unicode-xid-0.1.0",
build_file = Label("//wasm_bindgen/raze/remote:unicode-xid-0.1.0.BUILD"),
)
maybe(
http_archive,
name = "rules_rust_wasm_bindgen__wasm_bindgen__0_2_48",
url = "https://crates.io/api/v1/crates/wasm-bindgen/0.2.48/download",
type = "tar.gz",
sha256 = "4de97fa1806bb1a99904216f6ac5e0c050dc4f8c676dc98775047c38e5c01b55",
strip_prefix = "wasm-bindgen-0.2.48",
build_file = Label("//wasm_bindgen/raze/remote:wasm-bindgen-0.2.48.BUILD"),
)
maybe(
http_archive,
name = "rules_rust_wasm_bindgen__wasm_bindgen_backend__0_2_48",
url = "https://crates.io/api/v1/crates/wasm-bindgen-backend/0.2.48/download",
type = "tar.gz",
sha256 = "5d82c170ef9f5b2c63ad4460dfcee93f3ec04a9a36a4cc20bc973c39e59ab8e3",
strip_prefix = "wasm-bindgen-backend-0.2.48",
build_file = Label("//wasm_bindgen/raze/remote:wasm-bindgen-backend-0.2.48.BUILD"),
)
maybe(
http_archive,
name = "rules_rust_wasm_bindgen__wasm_bindgen_macro__0_2_48",
url = "https://crates.io/api/v1/crates/wasm-bindgen-macro/0.2.48/download",
type = "tar.gz",
sha256 = "f07d50f74bf7a738304f6b8157f4a581e1512cd9e9cdb5baad8c31bbe8ffd81d",
strip_prefix = "wasm-bindgen-macro-0.2.48",
build_file = Label("//wasm_bindgen/raze/remote:wasm-bindgen-macro-0.2.48.BUILD"),
)
maybe(
http_archive,
name = "rules_rust_wasm_bindgen__wasm_bindgen_macro_support__0_2_48",
url = "https://crates.io/api/v1/crates/wasm-bindgen-macro-support/0.2.48/download",
type = "tar.gz",
sha256 = "95cf8fe77e45ba5f91bc8f3da0c3aa5d464b3d8ed85d84f4d4c7cc106436b1d7",
strip_prefix = "wasm-bindgen-macro-support-0.2.48",
build_file = Label("//wasm_bindgen/raze/remote:wasm-bindgen-macro-support-0.2.48.BUILD"),
)
maybe(
http_archive,
name = "rules_rust_wasm_bindgen__wasm_bindgen_shared__0_2_48",
url = "https://crates.io/api/v1/crates/wasm-bindgen-shared/0.2.48/download",
type = "tar.gz",
sha256 = "d9c2d4d4756b2e46d3a5422e06277d02e4d3e1d62d138b76a4c681e925743623",
strip_prefix = "wasm-bindgen-shared-0.2.48",
build_file = Label("//wasm_bindgen/raze/remote:wasm-bindgen-shared-0.2.48.BUILD"),
)
maybe(
http_archive,
name = "rules_rust_wasm_bindgen__addr2line__0_13_0",
url = "https://crates.io/api/v1/crates/addr2line/0.13.0/download",
type = "tar.gz",
sha256 = "1b6a2d3371669ab3ca9797670853d61402b03d0b4b9ebf33d677dfa720203072",
strip_prefix = "addr2line-0.13.0",
build_file = Label("//wasm_bindgen/raze/remote:addr2line-0.13.0.BUILD"),
)
maybe(
http_archive,
name = "rules_rust_wasm_bindgen__adler__0_2_3",
url = "https://crates.io/api/v1/crates/adler/0.2.3/download",
type = "tar.gz",
sha256 = "ee2a4ec343196209d6594e19543ae87a39f96d5534d7174822a3ad825dd6ed7e",
strip_prefix = "adler-0.2.3",
build_file = Label("//wasm_bindgen/raze/remote:adler-0.2.3.BUILD"),
)
maybe(
http_archive,
name = "rules_rust_wasm_bindgen__aho_corasick__0_7_14",
url = "https://crates.io/api/v1/crates/aho-corasick/0.7.14/download",
type = "tar.gz",
sha256 = "b476ce7103678b0c6d3d395dbbae31d48ff910bd28be979ba5d48c6351131d0d",
strip_prefix = "aho-corasick-0.7.14",
build_file = Label("//wasm_bindgen/raze/remote:aho-corasick-0.7.14.BUILD"),
)
maybe(
http_archive,
name = "rules_rust_wasm_bindgen__arrayref__0_3_6",
url = "https://crates.io/api/v1/crates/arrayref/0.3.6/download",
type = "tar.gz",
sha256 = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544",
strip_prefix = "arrayref-0.3.6",
build_file = Label("//wasm_bindgen/raze/remote:arrayref-0.3.6.BUILD"),
)
maybe(
http_archive,
name = "rules_rust_wasm_bindgen__arrayvec__0_5_1",
url = "https://crates.io/api/v1/crates/arrayvec/0.5.1/download",
type = "tar.gz",
sha256 = "cff77d8686867eceff3105329d4698d96c2391c176d5d03adc90c7389162b5b8",
strip_prefix = "arrayvec-0.5.1",
build_file = Label("//wasm_bindgen/raze/remote:arrayvec-0.5.1.BUILD"),
)
maybe(
http_archive,
name = "rules_rust_wasm_bindgen__ascii__0_8_7",
url = "https://crates.io/api/v1/crates/ascii/0.8.7/download",
type = "tar.gz",
sha256 = "97be891acc47ca214468e09425d02cef3af2c94d0d82081cd02061f996802f14",
strip_prefix = "ascii-0.8.7",
build_file = Label("//wasm_bindgen/raze/remote:ascii-0.8.7.BUILD"),
)
maybe(
http_archive,
name = "rules_rust_wasm_bindgen__assert_cmd__0_11_1",
url = "https://crates.io/api/v1/crates/assert_cmd/0.11.1/download",
type = "tar.gz",
sha256 = "2dc477793bd82ec39799b6f6b3df64938532fdf2ab0d49ef817eac65856a5a1e",
strip_prefix = "assert_cmd-0.11.1",
build_file = Label("//wasm_bindgen/raze/remote:assert_cmd-0.11.1.BUILD"),
)
maybe(
http_archive,
name = "rules_rust_wasm_bindgen__atty__0_2_14",
url = "https://crates.io/api/v1/crates/atty/0.2.14/download",
type = "tar.gz",
sha256 = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8",
strip_prefix = "atty-0.2.14",
build_file = Label("//wasm_bindgen/raze/remote:atty-0.2.14.BUILD"),
)
maybe(
http_archive,
name = "rules_rust_wasm_bindgen__autocfg__0_1_7",
url = "https://crates.io/api/v1/crates/autocfg/0.1.7/download",
type = "tar.gz",
sha256 = "1d49d90015b3c36167a20fe2810c5cd875ad504b39cff3d4eae7977e6b7c1cb2",
strip_prefix = "autocfg-0.1.7",
build_file = Label("//wasm_bindgen/raze/remote:autocfg-0.1.7.BUILD"),
)
maybe(
http_archive,
name = "rules_rust_wasm_bindgen__autocfg__1_0_1",
url = "https://crates.io/api/v1/crates/autocfg/1.0.1/download",
type = "tar.gz",
sha256 = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a",
strip_prefix = "autocfg-1.0.1",
build_file = Label("//wasm_bindgen/raze/remote:autocfg-1.0.1.BUILD"),
)
maybe(
http_archive,
name = "rules_rust_wasm_bindgen__backtrace__0_3_53",
url = "https://crates.io/api/v1/crates/backtrace/0.3.53/download",
type = "tar.gz",
sha256 = "707b586e0e2f247cbde68cdd2c3ce69ea7b7be43e1c5b426e37c9319c4b9838e",
strip_prefix = "backtrace-0.3.53",
build_file = Label("//wasm_bindgen/raze/remote:backtrace-0.3.53.BUILD"),
)
maybe(
http_archive,
name = "rules_rust_wasm_bindgen__base64__0_12_3",
url = "https://crates.io/api/v1/crates/base64/0.12.3/download",
type = "tar.gz",
sha256 = "3441f0f7b02788e948e47f457ca01f1d7e6d92c693bc132c22b087d3141c03ff",
strip_prefix = "base64-0.12.3",
build_file = Label("//wasm_bindgen/raze/remote:base64-0.12.3.BUILD"),
)
maybe(
http_archive,
name = "rules_rust_wasm_bindgen__base64__0_9_3",
url = "https://crates.io/api/v1/crates/base64/0.9.3/download",
type = "tar.gz",
sha256 = "489d6c0ed21b11d038c31b6ceccca973e65d73ba3bd8ecb9a2babf5546164643",
strip_prefix = "base64-0.9.3",
build_file = Label("//wasm_bindgen/raze/remote:base64-0.9.3.BUILD"),
)
maybe(
http_archive,
name = "rules_rust_wasm_bindgen__bitflags__1_2_1",
url = "https://crates.io/api/v1/crates/bitflags/1.2.1/download",
type = "tar.gz",
sha256 = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693",
strip_prefix = "bitflags-1.2.1",
build_file = Label("//wasm_bindgen/raze/remote:bitflags-1.2.1.BUILD"),
)
maybe(
http_archive,
name = "rules_rust_wasm_bindgen__blake2b_simd__0_5_10",
url = "https://crates.io/api/v1/crates/blake2b_simd/0.5.10/download",
type = "tar.gz",
sha256 = "d8fb2d74254a3a0b5cac33ac9f8ed0e44aa50378d9dbb2e5d83bd21ed1dc2c8a",
strip_prefix = "blake2b_simd-0.5.10",
build_file = Label("//wasm_bindgen/raze/remote:blake2b_simd-0.5.10.BUILD"),
)
maybe(
http_archive,
name = "rules_rust_wasm_bindgen__buf_redux__0_8_4",
url = "https://crates.io/api/v1/crates/buf_redux/0.8.4/download",
type = "tar.gz",
sha256 = "b953a6887648bb07a535631f2bc00fbdb2a2216f135552cb3f534ed136b9c07f",
strip_prefix = "buf_redux-0.8.4",
build_file = Label("//wasm_bindgen/raze/remote:buf_redux-0.8.4.BUILD"),
)
maybe(
http_archive,
name = "rules_rust_wasm_bindgen__byteorder__1_3_4",
url = "https://crates.io/api/v1/crates/byteorder/1.3.4/download",
type = "tar.gz",
sha256 = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de",
strip_prefix = "byteorder-1.3.4",
build_file = Label("//wasm_bindgen/raze/remote:byteorder-1.3.4.BUILD"),
)
maybe(
http_archive,
name = "rules_rust_wasm_bindgen__cc__1_0_61",
url = "https://crates.io/api/v1/crates/cc/1.0.61/download",
type = "tar.gz",
sha256 = "ed67cbde08356238e75fc4656be4749481eeffb09e19f320a25237d5221c985d",
strip_prefix = "cc-1.0.61",
build_file = Label("//wasm_bindgen/raze/remote:cc-1.0.61.BUILD"),
)
maybe(
http_archive,
name = "rules_rust_wasm_bindgen__cfg_if__0_1_10",
url = "https://crates.io/api/v1/crates/cfg-if/0.1.10/download",
type = "tar.gz",
sha256 = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822",
strip_prefix = "cfg-if-0.1.10",
build_file = Label("//wasm_bindgen/raze/remote:cfg-if-0.1.10.BUILD"),
)
maybe(
http_archive,
name = "rules_rust_wasm_bindgen__cfg_if__1_0_0",
url = "https://crates.io/api/v1/crates/cfg-if/1.0.0/download",
type = "tar.gz",
sha256 = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd",
strip_prefix = "cfg-if-1.0.0",
build_file = Label("//wasm_bindgen/raze/remote:cfg-if-1.0.0.BUILD"),
)
maybe(
http_archive,
name = "rules_rust_wasm_bindgen__chrono__0_4_19",
url = "https://crates.io/api/v1/crates/chrono/0.4.19/download",
type = "tar.gz",
sha256 = "670ad68c9088c2a963aaa298cb369688cf3f9465ce5e2d4ca10e6e0098a1ce73",
strip_prefix = "chrono-0.4.19",
build_file = Label("//wasm_bindgen/raze/remote:chrono-0.4.19.BUILD"),
)
maybe(
http_archive,
name = "rules_rust_wasm_bindgen__chunked_transfer__0_3_1",
url = "https://crates.io/api/v1/crates/chunked_transfer/0.3.1/download",
type = "tar.gz",
sha256 = "498d20a7aaf62625b9bf26e637cf7736417cde1d0c99f1d04d1170229a85cf87",
strip_prefix = "chunked_transfer-0.3.1",
build_file = Label("//wasm_bindgen/raze/remote:chunked_transfer-0.3.1.BUILD"),
)
maybe(
http_archive,
name = "rules_rust_wasm_bindgen__cloudabi__0_0_3",
url = "https://crates.io/api/v1/crates/cloudabi/0.0.3/download",
type = "tar.gz",
sha256 = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f",
strip_prefix = "cloudabi-0.0.3",
build_file = Label("//wasm_bindgen/raze/remote:cloudabi-0.0.3.BUILD"),
)
maybe(
http_archive,
name = "rules_rust_wasm_bindgen__constant_time_eq__0_1_5",
url = "https://crates.io/api/v1/crates/constant_time_eq/0.1.5/download",
type = "tar.gz",
sha256 = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc",
strip_prefix = "constant_time_eq-0.1.5",
build_file = Label("//wasm_bindgen/raze/remote:constant_time_eq-0.1.5.BUILD"),
)
maybe(
http_archive,
name = "rules_rust_wasm_bindgen__crossbeam_channel__0_4_4",
url = "https://crates.io/api/v1/crates/crossbeam-channel/0.4.4/download",
type = "tar.gz",
sha256 = "b153fe7cbef478c567df0f972e02e6d736db11affe43dfc9c56a9374d1adfb87",
strip_prefix = "crossbeam-channel-0.4.4",
build_file = Label("//wasm_bindgen/raze/remote:crossbeam-channel-0.4.4.BUILD"),
)
maybe(
http_archive,
name = "rules_rust_wasm_bindgen__crossbeam_deque__0_7_3",
url = "https://crates.io/api/v1/crates/crossbeam-deque/0.7.3/download",
type = "tar.gz",
sha256 = "9f02af974daeee82218205558e51ec8768b48cf524bd01d550abe5573a608285",
strip_prefix = "crossbeam-deque-0.7.3",
build_file = Label("//wasm_bindgen/raze/remote:crossbeam-deque-0.7.3.BUILD"),
)
maybe(
http_archive,
name = "rules_rust_wasm_bindgen__crossbeam_epoch__0_8_2",
url = "https://crates.io/api/v1/crates/crossbeam-epoch/0.8.2/download",
type = "tar.gz",
sha256 = "058ed274caafc1f60c4997b5fc07bf7dc7cca454af7c6e81edffe5f33f70dace",
strip_prefix = "crossbeam-epoch-0.8.2",
build_file = Label("//wasm_bindgen/raze/remote:crossbeam-epoch-0.8.2.BUILD"),
)
maybe(
http_archive,
name = "rules_rust_wasm_bindgen__crossbeam_utils__0_7_2",
url = "https://crates.io/api/v1/crates/crossbeam-utils/0.7.2/download",
type = "tar.gz",
sha256 = "c3c7c73a2d1e9fc0886a08b93e98eb643461230d5f1925e4036204d5f2e261a8",
strip_prefix = "crossbeam-utils-0.7.2",
build_file = Label("//wasm_bindgen/raze/remote:crossbeam-utils-0.7.2.BUILD"),
)
maybe(
http_archive,
name = "rules_rust_wasm_bindgen__curl__0_4_34",
url = "https://crates.io/api/v1/crates/curl/0.4.34/download",
type = "tar.gz",
sha256 = "e268162af1a5fe89917ae25ba3b0a77c8da752bdc58e7dbb4f15b91fbd33756e",
strip_prefix = "curl-0.4.34",
build_file = Label("//wasm_bindgen/raze/remote:curl-0.4.34.BUILD"),
)
maybe(
http_archive,
name = "rules_rust_wasm_bindgen__curl_sys__0_4_38_curl_7_73_0",
url = "https://crates.io/api/v1/crates/curl-sys/0.4.38+curl-7.73.0/download",
type = "tar.gz",
sha256 = "498ecfb4f59997fd40023d62a9f1e506e768b2baeb59a1d311eb9751cdcd7e3f",
strip_prefix = "curl-sys-0.4.38+curl-7.73.0",
build_file = Label("//wasm_bindgen/raze/remote:curl-sys-0.4.38+curl-7.73.0.BUILD"),
)
maybe(
http_archive,
name = "rules_rust_wasm_bindgen__difference__2_0_0",
url = "https://crates.io/api/v1/crates/difference/2.0.0/download",
type = "tar.gz",
sha256 = "524cbf6897b527295dff137cec09ecf3a05f4fddffd7dfcd1585403449e74198",
strip_prefix = "difference-2.0.0",
build_file = Label("//wasm_bindgen/raze/remote:difference-2.0.0.BUILD"),
)
maybe(
http_archive,
name = "rules_rust_wasm_bindgen__dirs__1_0_5",
url = "https://crates.io/api/v1/crates/dirs/1.0.5/download",
type = "tar.gz",
sha256 = "3fd78930633bd1c6e35c4b42b1df7b0cbc6bc191146e512bb3bedf243fcc3901",
strip_prefix = "dirs-1.0.5",
build_file = Label("//wasm_bindgen/raze/remote:dirs-1.0.5.BUILD"),
)
maybe(
http_archive,
name = "rules_rust_wasm_bindgen__docopt__1_1_0",
url = "https://crates.io/api/v1/crates/docopt/1.1.0/download",
type = "tar.gz",
sha256 = "7f525a586d310c87df72ebcd98009e57f1cc030c8c268305287a476beb653969",
strip_prefix = "docopt-1.1.0",
build_file = Label("//wasm_bindgen/raze/remote:docopt-1.1.0.BUILD"),
)
maybe(
http_archive,
name = "rules_rust_wasm_bindgen__either__1_6_1",
url = "https://crates.io/api/v1/crates/either/1.6.1/download",
type = "tar.gz",
sha256 = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457",
strip_prefix = "either-1.6.1",
build_file = Label("//wasm_bindgen/raze/remote:either-1.6.1.BUILD"),
)
maybe(
http_archive,
name = "rules_rust_wasm_bindgen__env_logger__0_6_2",
url = "https://crates.io/api/v1/crates/env_logger/0.6.2/download",
type = "tar.gz",
sha256 = "aafcde04e90a5226a6443b7aabdb016ba2f8307c847d524724bd9b346dd1a2d3",
strip_prefix = "env_logger-0.6.2",
build_file = Label("//wasm_bindgen/raze/remote:env_logger-0.6.2.BUILD"),
)
maybe(
http_archive,
name = "rules_rust_wasm_bindgen__escargot__0_4_0",
url = "https://crates.io/api/v1/crates/escargot/0.4.0/download",
type = "tar.gz",
sha256 = "ceb9adbf9874d5d028b5e4c5739d22b71988252b25c9c98fe7cf9738bee84597",
strip_prefix = "escargot-0.4.0",
build_file = Label("//wasm_bindgen/raze/remote:escargot-0.4.0.BUILD"),
)
maybe(
http_archive,
name = "rules_rust_wasm_bindgen__failure__0_1_8",
url = "https://crates.io/api/v1/crates/failure/0.1.8/download",
type = "tar.gz",
sha256 = "d32e9bd16cc02eae7db7ef620b392808b89f6a5e16bb3497d159c6b92a0f4f86",
strip_prefix = "failure-0.1.8",
build_file = Label("//wasm_bindgen/raze/remote:failure-0.1.8.BUILD"),
)
maybe(
http_archive,
name = "rules_rust_wasm_bindgen__failure_derive__0_1_8",
url = "https://crates.io/api/v1/crates/failure_derive/0.1.8/download",
type = "tar.gz",
sha256 = "aa4da3c766cd7a0db8242e326e9e4e081edd567072893ed320008189715366a4",
strip_prefix = "failure_derive-0.1.8",
build_file = Label("//wasm_bindgen/raze/remote:failure_derive-0.1.8.BUILD"),
)
maybe(
http_archive,
name = "rules_rust_wasm_bindgen__filetime__0_2_12",
url = "https://crates.io/api/v1/crates/filetime/0.2.12/download",
type = "tar.gz",
sha256 = "3ed85775dcc68644b5c950ac06a2b23768d3bc9390464151aaf27136998dcf9e",
strip_prefix = "filetime-0.2.12",
build_file = Label("//wasm_bindgen/raze/remote:filetime-0.2.12.BUILD"),
)
maybe(
http_archive,
name = "rules_rust_wasm_bindgen__float_cmp__0_8_0",
url = "https://crates.io/api/v1/crates/float-cmp/0.8.0/download",
type = "tar.gz",
sha256 = "e1267f4ac4f343772758f7b1bdcbe767c218bbab93bb432acbf5162bbf85a6c4",
strip_prefix = "float-cmp-0.8.0",
build_file = Label("//wasm_bindgen/raze/remote:float-cmp-0.8.0.BUILD"),
)
maybe(
http_archive,
name = "rules_rust_wasm_bindgen__fuchsia_cprng__0_1_1",
url = "https://crates.io/api/v1/crates/fuchsia-cprng/0.1.1/download",
type = "tar.gz",
sha256 = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba",
strip_prefix = "fuchsia-cprng-0.1.1",
build_file = Label("//wasm_bindgen/raze/remote:fuchsia-cprng-0.1.1.BUILD"),
)
maybe(
http_archive,
name = "rules_rust_wasm_bindgen__getrandom__0_1_15",
url = "https://crates.io/api/v1/crates/getrandom/0.1.15/download",
type = "tar.gz",
sha256 = "fc587bc0ec293155d5bfa6b9891ec18a1e330c234f896ea47fbada4cadbe47e6",
strip_prefix = "getrandom-0.1.15",
build_file = Label("//wasm_bindgen/raze/remote:getrandom-0.1.15.BUILD"),
)
maybe(
http_archive,
name = "rules_rust_wasm_bindgen__gimli__0_22_0",
url = "https://crates.io/api/v1/crates/gimli/0.22.0/download",
type = "tar.gz",
sha256 = "aaf91faf136cb47367fa430cd46e37a788775e7fa104f8b4bcb3861dc389b724",
strip_prefix = "gimli-0.22.0",
build_file = Label("//wasm_bindgen/raze/remote:gimli-0.22.0.BUILD"),
)
maybe(
http_archive,
| |
Disks:",
"File Size", "Capacity", "Device"),
template.format("", "---------", "---------",
"--------------------")]
for file_obj in file_list:
(file_id, file_size,
disk_id, disk_cap, device_str) = self._info_strings_for_file(
file_obj)
href_str = " " + file_obj.get(self.FILE_HREF)
# Truncate to fit in available space
if len(href_str) > href_w:
href_str = href_str[:(href_w-3)] + "..."
if disk_cap or device_str:
str_list.append(template.format(href_str, file_size,
disk_cap, device_str))
else:
str_list.append(template2.format(href_str, file_size))
if verbosity_option == 'verbose':
str_list.append(" File ID: {0}".format(file_id))
if disk_id:
str_list.append(" Disk ID: {0}".format(disk_id))
# Find placeholder disks as well
for disk in disk_list:
file_id = disk.get(self.DISK_FILE_REF)
file_obj = self.find_child(self.references, self.FILE,
attrib={self.FILE_ID: file_id})
if file_obj is not None:
continue # already reported on above
disk_cap_string = pretty_bytes(self.get_capacity_from_disk(disk))
device_item = self.find_item_from_disk(disk)
device_str = self.device_info_str(device_item)
str_list.append(template.format(" (disk placeholder)",
"--",
disk_cap_string,
device_str))
return "\n".join(str_list)
def _info_string_hardware(self, wrapper):
"""Describe hardware subtypes as part of :meth:`info_string`.
Args:
wrapper (textwrap.TextWrapper): Helper object for wrapping
text lines if needed.
Returns:
str: Hardware information string, or None
"""
virtual_system_types = self.system_types
scsi_subtypes = list_union(
*[scsi_ctrl.get_all_values(self.RESOURCE_SUB_TYPE) for
scsi_ctrl in self.hardware.find_all_items('scsi')])
ide_subtypes = list_union(
*[ide_ctrl.get_all_values(self.RESOURCE_SUB_TYPE) for
ide_ctrl in self.hardware.find_all_items('ide')])
eth_subtypes = list_union(
*[eth.get_all_values(self.RESOURCE_SUB_TYPE) for
eth in self.hardware.find_all_items('ethernet')])
if ((virtual_system_types is not None) or
(scsi_subtypes or ide_subtypes or eth_subtypes)):
str_list = ["Hardware Variants:"]
wrapper.subsequent_indent = ' ' * 28
if virtual_system_types is not None:
wrapper.initial_indent = " System types: "
str_list.extend(wrapper.wrap(" ".join(virtual_system_types)))
if scsi_subtypes:
wrapper.initial_indent = " SCSI device types: "
str_list.extend(wrapper.wrap(" ".join(scsi_subtypes)))
if ide_subtypes:
wrapper.initial_indent = " IDE device types: "
str_list.extend(wrapper.wrap(" ".join(ide_subtypes)))
if eth_subtypes:
wrapper.initial_indent = " Ethernet device types: "
str_list.extend(wrapper.wrap(" ".join(eth_subtypes)))
return "\n".join(str_list)
return None
def _info_string_networks(self, verbosity_option, wrapper):
"""Describe virtual networks as part of :meth:`info_string`.
Args:
verbosity_option (str): 'brief', None (default), or 'verbose'
wrapper (textwrap.TextWrapper): Helper object for wrapping
text lines if needed.
Returns:
str: Network information string, or None
"""
if self.network_section is None:
return None
str_list = ["Networks:"]
width = wrapper.width
names = []
descs = []
for network in self.network_section.findall(self.NETWORK):
names.append(network.get(self.NETWORK_NAME))
descs.append(network.findtext(self.NWK_DESC, None))
max_n = max([len(name) for name in names])
max_d = max([len(str(desc)) for desc in descs])
truncate = (max_n + max_d + 6 >= width and
verbosity_option != 'verbose')
wrapper.initial_indent = " "
wrapper.subsequent_indent = ' ' * (5 + max_n)
if truncate:
max_d = width - 6 - max_n
for name, desc in zip(names, descs):
if not desc:
str_list.append(" " + name)
elif truncate and len(desc) > max_d:
str_list.append(' {name:{w}} "{tdesc}..."'.format(
name=name, w=max_n, tdesc=desc[:max_d-3]))
else:
str_list.extend(wrapper.wrap(
'{name:{w}} "{desc}"'.format(name=name, w=max_n,
desc=desc)))
return "\n".join(str_list)
def _info_string_nics(self, verbosity_option, wrapper):
"""Describe NICs as part of :meth:`info_string`.
Args:
verbosity_option (str): 'brief', None (default), or 'verbose'
wrapper (textwrap.TextWrapper): Helper object for wrapping
text lines if needed.
Returns:
str: NIC information string, or None
"""
if verbosity_option == 'brief':
return None
nics = self.hardware.find_all_items('ethernet')
if not nics:
return None
str_list = ["NICs and Associated Networks:"]
wrapper.initial_indent = ' '
wrapper.subsequent_indent = ' '
max_len = max([len(str(nic.get_value(self.ELEMENT_NAME)))
for nic in nics])
max_len = max(max_len, len("<instance 10>"))
template = " {name:{len}} : {nwk}"
for nic in nics:
network_name = nic.get_value(self.CONNECTION)
nic_name = nic.get_value(self.ELEMENT_NAME)
if nic_name is None:
nic_name = "<instance {0}>".format(
nic.get_value(self.INSTANCE_ID))
str_list.append(template.format(name=nic_name,
len=max_len,
nwk=network_name))
if verbosity_option == 'verbose':
desc = nic.get_value(self.ITEM_DESCRIPTION)
if desc is None:
desc = nic.get_value(self.CAPTION)
if desc is not None:
str_list.extend(wrapper.wrap(desc))
return "\n".join(str_list)
def _info_string_environment(self, wrapper):
"""Describe environment for :meth:`info_string`.
Args:
wrapper (textwrap.TextWrapper): Helper object for wrapping
text lines if needed.
Returns:
str: Environment information string, or None
"""
if not self.environment_transports:
return None
str_list = ["Environment:"]
wrapper.initial_indent = ' '
wrapper.subsequent_indent = ' '
str_list.extend(wrapper.wrap(
"Transport types: {0}"
.format(" ".join(self.environment_transports))))
return "\n".join(str_list)
def _info_string_properties(self, verbosity_option, wrapper):
"""Describe config properties for :meth:`info_string`.
Args:
verbosity_option (str): 'brief', None (default), or 'verbose'
wrapper (textwrap.TextWrapper): Helper object for wrapping
text lines if needed.
Returns:
str: Property information string, or None
"""
properties = self.environment_properties
if not properties:
return None
str_list = ["Properties:"]
max_key = 2 + max([len(str(ph['key'])) for ph in properties])
max_label = max([len(str(ph['label'])) for ph in properties])
max_value = max([len(str(ph['value'])) for ph in properties])
width = wrapper.width
if all(ph['label'] for ph in properties):
max_width = max_label
else:
max_width = max(max_key, max_label)
wrapper.initial_indent = ' '
wrapper.subsequent_indent = ' '
for propdict in properties:
# If we have a label, and the terminal is wide enough,
# display "<key> label value", else if no label, display
# "<key> value", else only display "label value"
if max_label > 0 and (max_key + max_label + max_value <
width - 8):
format_str = ' {key:{kw}} {label:{lw}} {val}'
str_list.append(format_str.format(
key="<{0}>".format(propdict['key']),
kw=max_key,
label=propdict['label'],
lw=max_label,
val=('"{0}"'.format(propdict['value'])
if propdict['value'] is not None
else '--')))
else:
str_list.append(' {label:{width}} {val}'.format(
label=(propdict['label'] if propdict['label']
else "<{0}>".format(propdict['key'])),
width=max_width,
val=('"{0}"'.format(propdict['value'])
if propdict['value'] is not None
else '--')))
if verbosity_option == 'verbose':
for line in propdict['description'].splitlines():
if not line:
str_list.append("")
else:
str_list.extend(wrapper.wrap(line))
return "\n".join(str_list)
def info_string(self, width=79, verbosity_option=None):
"""Get a descriptive string summarizing the contents of this OVF.
Args:
width (int): Line length to wrap to where possible.
verbosity_option (str): 'brief', None (default), or 'verbose'
Returns:
str: Wrapped, appropriately verbose string.
"""
# Supposedly it's quicker to construct a list of strings then merge
# them all together with 'join()' rather than it is to repeatedly
# append to an existing string with '+'.
# I haven't profiled this to verify - it's fast enough for now.
# Don't break in mid-word or on hyphens, as the usual case where
# we may exceed the available width is URI literals, and there's
# no ideal way to wrap these.
wrapper = textwrap.TextWrapper(width=width,
break_long_words=False,
break_on_hyphens=False)
# File description
header = self._info_string_header(width)
section_list = [
self._info_string_product(verbosity_option, wrapper),
self._info_string_annotation(wrapper),
self._info_string_eula(verbosity_option, wrapper),
self._info_string_files_disks(width, verbosity_option),
self._info_string_hardware(wrapper),
self.profile_info_string(width, verbosity_option),
self._info_string_networks(verbosity_option, wrapper),
self._info_string_nics(verbosity_option, wrapper),
self._info_string_environment(wrapper),
self._info_string_properties(verbosity_option, wrapper)
]
# Discard empty sections
section_list = [s for s in section_list if s]
return header + '\n' + "\n\n".join(section_list)
def device_info_str(self, device_item):
"""Get a one-line summary of a hardware device.
Args:
device_item (OVFItem): Device to summarize
Returns:
str: Descriptive string such as "harddisk @ IDE 1:0"
"""
if device_item is None:
return ""
controller_item = self.find_parent_from_item(device_item)
if controller_item is None:
ctrl_type = "(?)"
ctrl_addr = "?"
else:
ctrl_type = controller_item.hardware_type.upper()
ctrl_addr = controller_item.get_value(self.ADDRESS)
return "{0} @ {1} {2}:{3}".format(
device_item.hardware_type,
ctrl_type,
ctrl_addr,
device_item.get_value(self.ADDRESS_ON_PARENT))
PROFILE_INFO_TEMPLATE = (
"{{0:{0}}} " # profile name - width is dynamically set
"{{1:>4}} " # CPUs - width 4 for "CPUs"
"{{2:>9}} " # memory - width 9 for "999.9 MiB"
"{{3:>4}} " # NICs - width 4 for "NICs"
"{{4:>7}} " # serial - width 7 for "Serials"
"{{5:>14}}" # disks - width 14 for "Disks/Capacity","10 / 999.9 MiB"
)
def profile_info_list(self, width=79, verbose=False):
"""Get a list describing available configuration profiles.
Args:
width (int): Line length to wrap to if possible
verbose (bool): if True, generate multiple lines per profile
Returns:
tuple: (header, list)
"""
str_list = []
default_profile_id = self.default_config_profile
profile_ids = self.config_profiles
if not profile_ids:
profile_ids = [None]
prof_w = max(len("Configuration Profiles: "),
2 + max([(len(str(pid))) for pid in profile_ids]),
2 + len(str(default_profile_id) + " (default)"))
# Profile information
template = self.PROFILE_INFO_TEMPLATE.format(prof_w)
header = template.format("Configuration Profiles:", "CPUs", "Memory",
"NICs", "Serials", "Disks/Capacity")
header += "\n" + template.format("", "----", "---------", "----",
"-------", "--------------")
if verbose:
wrapper = textwrap.TextWrapper(width=width,
initial_indent=' ',
subsequent_indent=' ' * 21)
index = 0
for profile_id in profile_ids:
cpus = 0
cpu_item = self.hardware.find_item('cpu', profile=profile_id)
if cpu_item:
cpus = cpu_item.get_value(self.VIRTUAL_QUANTITY,
[profile_id])
mem_bytes = 0
ram_item = self.hardware.find_item('memory', profile=profile_id)
if ram_item:
mem_bytes = programmatic_bytes_to_int(
ram_item.get_value(self.VIRTUAL_QUANTITY, [profile_id]),
ram_item.get_value(self.ALLOCATION_UNITS, [profile_id]))
nics = self.hardware.get_item_count('ethernet', profile_id)
serials = self.hardware.get_item_count('serial', profile_id)
disk_count = self.hardware.get_item_count('harddisk',
profile_id)
disks_size = 0
if self.disk_section is not None:
for disk in self.disk_section.findall(self.DISK):
disks_size += self.get_capacity_from_disk(disk)
profile_str = " " + str(profile_id)
if profile_id == default_profile_id:
profile_str += " (default)"
str_list.append(template.format(
profile_str,
cpus,
pretty_bytes(mem_bytes),
nics,
serials,
"{0:2} / {1:>9}".format(disk_count,
pretty_bytes(disks_size))))
if profile_id is not None and verbose:
profile = self.find_child(self.deploy_opt_section,
self.CONFIG,
attrib={self.CONFIG_ID: profile_id})
str_list.extend(wrapper.wrap(
'{0:15} "{1}"'.format("Label:",
profile.findtext(self.CFG_LABEL))))
str_list.extend(wrapper.wrap(
'{0:15} "{1}"'.format("Description:",
profile.findtext(self.CFG_DESC))))
index += 1
return (header, str_list)
def profile_info_string(self, width=79, verbosity_option=None):
"""Get a string summarizing available configuration profiles.
Args:
width (int): Line | |
<gh_stars>0
import pytest
import requests
import logging
import json
import copy
import random
import arrow
from faker import Faker
# ----------------------------------- vectors -----------------------------------
class TestHTTPVector:
small_size = 10000
normal_size = 100000
big_size = 50000000
def get_mode(self, args):
base_url = 'http://%s:%s/' % (args['ip'], args['port'])
res = requests.get(base_url + 'system/mode')
return res.json()['reply'].upper()
def drop_table_and_wait(self, table_name, args):
try:
base_url = 'http://%s:%s/' % (args['ip'], args['port'])
requests.delete(base_url + 'tables/' + table_name)
except Exception as e:
logging.getLogger().info(e)
finally:
time.sleep(2)
def drop_table(self, table_name, args):
try:
base_url = 'http://%s:%s/' % (args['ip'], args['port'])
requests.delete(base_url + 'tables/' + table_name)
except Exception as e:
logging.getLogger().info(e)
fa = Faker()
def make_records(self, value_type='FLOAT', count=1000, length=32, min_value=0, max_value=1000):
arr = []
for i in range(count):
vector = []
for j in range(length):
if value_type == 'INT':
vector.append(random.randint(min_value, max_value))
else:
vector.append(random.uniform(min_value, max_value))
arr.append(vector)
return arr
def test_post_vectors_1(self, args):
base_url = 'http://%s:%s/' % (args['ip'], args['port'])
req = {
"table_name": self.fa.pystr(),
"dimension": 32,
"index_file_size": 10,
"metric_type": 'L2'
}
res = requests.post(base_url + 'tables', json.dumps(req))
size = self.small_size
dim = req['dimension']
arr = self.make_records('FLOAT', size, dim, 0, size * 50)
req_vectors = {
"records": arr
}
res = requests.post(base_url + 'tables/%s/vectors' % req['table_name'], json.dumps(req_vectors))
assert size == len(res.json()['ids'])
self.drop_table(req['table_name'], args)
def test_post_vectors_2(self, args):
base_url = 'http://%s:%s/' % (args['ip'], args['port'])
req = {
"table_name": self.fa.pystr(),
"dimension": 32,
"index_file_size": 10,
"metric_type": 'L2'
}
res = requests.post(base_url + 'tables', json.dumps(req))
size = self.small_size
dim = req['dimension']
arr = self.make_records('FLOAT', size, dim - 2, 0, size * 50)
req_vectors = {"records": arr}
res = requests.post(base_url + 'tables/%s/vectors' % req['table_name'], json.dumps(req_vectors))
assert 400 == res.status_code
assert 7 == res.json()['code']
assert "The vector dimension must be equal to the table dimension." == res.json()['message']
self.drop_table(req['table_name'], args)
def test_post_vectors_3(self, args):
base_url = 'http://%s:%s/' % (args['ip'], args['port'])
req = {
"table_name": self.fa.pystr(),
"dimension": 32,
"index_file_size": 10,
"metric_type": 'L2'
}
res = requests.post(base_url + 'tables', json.dumps(req))
size = self.small_size
dim = req['dimension']
arr = self.make_records('FLOAT', size, dim + 2, 0, size * 50)
req_vectors = {"records": arr}
res = requests.post(base_url + 'tables/%s/vectors' % req['table_name'], json.dumps(req_vectors))
# logging.getLogger().info(res.status_code)
# logging.getLogger().info(res.text)
assert 400 == res.status_code
assert 7 == res.json()['code']
assert "The vector dimension must be equal to the table dimension." == res.json()['message']
self.drop_table(req['table_name'], args)
def test_post_vectors_4(self, args):
base_url = 'http://%s:%s/' % (args['ip'], args['port'])
req = {
"table_name": self.fa.pystr(),
"dimension": 32,
"index_file_size": 10,
"metric_type": 'L2'
}
res = requests.post(base_url + 'tables', json.dumps(req))
size = self.small_size
dim = req['dimension']
arr = self.make_records('INT', size, dim, 0, size * 50)
req_vectors = {"records": arr}
res = requests.post(base_url + 'tables/%s/vectors' % req['table_name'], json.dumps(req_vectors))
assert len(res.json()['ids']) == size
assert 201 == res.status_code
self.drop_table(req['table_name'], args)
def test_post_vectors_5(self, args):
base_url = 'http://%s:%s/' % (args['ip'], args['port'])
req = {
"table_name": self.fa.pystr(),
"dimension": 64,
"index_file_size": 10,
"metric_type": 'L2'
}
res = requests.post(base_url + 'tables', json.dumps(req))
size = self.small_size
dim = req['dimension']
arr = self.make_records('INT', size, dim, 0, size * 50)
req_vectors = {"records": arr}
res = requests.post(base_url + 'tables/%s/vectors' % req['table_name'], json.dumps(req_vectors))
assert len(res.json()['ids']) == size
assert 201 == res.status_code
self.drop_table(req['table_name'], args)
def test_post_vectors_6(self, args):
base_url = 'http://%s:%s/' % (args['ip'], args['port'])
req = {
"table_name": self.fa.pystr(),
"dimension": 128,
"index_file_size": 10,
"metric_type": 'L2'
}
res = requests.post(base_url + 'tables', json.dumps(req))
size = self.small_size
dim = req['dimension']
arr = self.make_records('INT', size, dim, 0, size * 50)
req_vectors = {"records": arr}
res = requests.post(base_url + 'tables/%s/vectors' % req['table_name'], json.dumps(req_vectors))
assert len(res.json()['ids']) == size
assert 201 == res.status_code
self.drop_table(req['table_name'], args)
def test_post_vectors_7(self, args):
base_url = 'http://%s:%s/' % (args['ip'], args['port'])
req = {
"table_name": self.fa.pystr(),
"dimension": 256,
"index_file_size": 10,
"metric_type": 'L2'
}
res = requests.post(base_url + 'tables', json.dumps(req))
size = self.small_size
dim = req['dimension']
arr = self.make_records('INT', size, dim, 0, size * 50)
req_vectors = {"records": arr}
res = requests.post(base_url + 'tables/%s/vectors' % req['table_name'], json.dumps(req_vectors))
assert len(res.json()['ids']) == size
assert 201 == res.status_code
self.drop_table(req['table_name'], args)
def test_post_vectors_8(self, args):
base_url = 'http://%s:%s/' % (args['ip'], args['port'])
req = {
"table_name": self.fa.pystr(),
"dimension": 512,
"index_file_size": 10,
"metric_type": 'L2'
}
res = requests.post(base_url + 'tables', json.dumps(req))
size = self.small_size
dim = req['dimension']
arr = self.make_records('INT', size, dim, 0, size * 50)
req_vectors = {"records": arr}
res = requests.post(base_url + 'tables/%s/vectors' % req['table_name'], json.dumps(req_vectors))
assert len(res.json()['ids']) == size
assert 201 == res.status_code
self.drop_table(req['table_name'], args)
def test_post_vectors_9(self, args):
base_url = 'http://%s:%s/' % (args['ip'], args['port'])
req = {
"table_name": self.fa.pystr(),
"dimension": 64,
"index_file_size": 10,
"metric_type": 'L2'
}
res = requests.post(base_url + 'tables', json.dumps(req))
size = self.small_size
dim = req['dimension']
arr = self.make_records('FLOAT', size, dim, 0, size * 50)
req_vectors = {"records": arr}
res = requests.post(base_url + 'tables/%s/vectors' % req['table_name'], json.dumps(req_vectors))
assert len(res.json()['ids']) == size
assert 201 == res.status_code
self.drop_table(req['table_name'], args)
def test_post_vectors_10(self, args):
base_url = 'http://%s:%s/' % (args['ip'], args['port'])
req = {
"table_name": self.fa.pystr(),
"dimension": 128,
"index_file_size": 10,
"metric_type": 'L2'
}
res = requests.post(base_url + 'tables', json.dumps(req))
size = self.small_size
dim = req['dimension']
arr = self.make_records('FLOAT', size, dim, 0, size * 50)
req_vectors = {"records": arr}
res = requests.post(base_url + 'tables/%s/vectors' % req['table_name'], json.dumps(req_vectors))
assert len(res.json()['ids']) == size
assert 201 == res.status_code
self.drop_table(req['table_name'], args)
def test_post_vectors_11(self, args):
base_url = 'http://%s:%s/' % (args['ip'], args['port'])
req = {
"table_name": self.fa.pystr(),
"dimension": 256,
"index_file_size": 10,
"metric_type": 'L2'
}
res = requests.post(base_url + 'tables', json.dumps(req))
size = self.small_size
dim = req['dimension']
arr = self.make_records('FLOAT', size, dim, 0, size * 50)
req_vectors = {"records": arr}
res = requests.post(base_url + 'tables/%s/vectors' % req['table_name'], json.dumps(req_vectors))
assert len(res.json()['ids']) == size
assert 201 == res.status_code
self.drop_table(req['table_name'], args)
def test_post_vectors_12(self, args):
base_url = 'http://%s:%s/' % (args['ip'], args['port'])
req = {
"table_name": self.fa.pystr(),
"dimension": 512,
"index_file_size": 10,
"metric_type": 'L2'
}
res = requests.post(base_url + 'tables', json.dumps(req))
size = self.small_size
dim = req['dimension']
arr = self.make_records('FLOAT', size, dim, 0, size * 50)
req_vectors = {"records": arr}
res = requests.post(base_url + 'tables/%s/vectors' % req['table_name'], json.dumps(req_vectors))
assert len(res.json()['ids']) == size
assert 201 == res.status_code
self.drop_table(req['table_name'], args)
def test_post_vectors_13(self, args):
base_url = 'http://%s:%s/' % (args['ip'], args['port'])
req = {
"table_name": self.fa.pystr(),
"dimension": 512,
"index_file_size": 10,
"metric_type": 'L2'
}
res = requests.post(base_url + 'tables', json.dumps(req))
size = self.small_size
dim = req['dimension']
arr = self.make_records('INT', size, dim, 0, size * 50)
req_vectors = {"records": arr}
res = requests.post(base_url + 'tables/%s/vectors/' % req['table_name'], json.dumps(req_vectors))
assert len(res.json()['ids']) == size
assert 201 == res.status_code
self.drop_table(req['table_name'], args)
def test_post_vectors_14(self, args):
base_url = 'http://%s:%s/' % (args['ip'], args['port'])
req = {
"table_name": self.fa.pystr(),
"dimension": 64,
"index_file_size": 10,
"metric_type": 'L2'
}
res = requests.post(base_url + 'tables', json.dumps(req))
size = self.small_size
dim = req['dimension']
arr = self.make_records('FLOAT', size, dim, 0, size * 50)
req_vectors = {"records": arr}
res = requests.post(base_url + 'tables/%s/vectors/' % req['table_name'], json.dumps(req_vectors))
assert len(res.json()['ids']) == size
assert 201 == res.status_code
self.drop_table(req['table_name'], args)
def test_post_vectors_15(self, args):
base_url = 'http://%s:%s/' % (args['ip'], args['port'])
req = {
"table_name": self.fa.pystr(),
"dimension": 128,
"index_file_size": 10,
"metric_type": 'L2'
}
res = requests.post(base_url + 'tables', json.dumps(req))
size = self.small_size
dim = req['dimension']
arr = self.make_records('INT', size, dim, 0, size * 50)
req_vectors = {"records": arr}
res = requests.post(base_url + 'tables/%s/vectors?' % req['table_name'], json.dumps(req_vectors))
assert len(res.json()['ids']) == size
assert 201 == res.status_code
self.drop_table(req['table_name'], args)
def test_post_vectors_16(self, args):
base_url = 'http://%s:%s/' % (args['ip'], args['port'])
req = {
"table_name": self.fa.pystr(),
"dimension": 32,
"index_file_size": 10,
"metric_type": 'L2'
}
res = requests.post(base_url + 'tables', json.dumps(req))
size = self.small_size
dim = req['dimension']
arr = self.make_records('FLOAT', size, dim, 0, size * 50)
req_vectors = {"records": arr}
res = requests.post(base_url + 'tables/%s/vectors?' % req['table_name'], json.dumps(req_vectors))
assert len(res.json()['ids']) == size
assert 201 == res.status_code
self.drop_table(req['table_name'], args)
def test_post_vectors_17(self, args):
base_url = 'http://%s:%s/' % (args['ip'], args['port'])
req = {
"table_name": self.fa.pystr(),
"dimension": 256,
"index_file_size": 10,
"metric_type": 'L2'
}
res = requests.post(base_url + 'tables', json.dumps(req))
size = self.small_size
dim = req['dimension']
arr = self.make_records('INT', size, dim, 0, size * 50)
req_vectors = {"records": arr}
res = requests.post(base_url + 'tables/%s/vectors#' % req['table_name'], json.dumps(req_vectors))
assert len(res.json()['ids']) | |
"""Return Oriented Programming
"""
import hashlib, os, sys, tempfile, re
from .elf import ELF
from .util import packing
from .log import getLogger
log = getLogger(__name__)
class ROP(object):
"""Class which simplifies the generation of ROP-chains.
Example:
.. code-block:: python
elf = ELF('ropasaurusrex')
rop = ROP(elf)
rop.read(0, elf.bss(0x80))
rop.dump()
# ['0x0000: 0x80482fc (read)',
# '0x0004: 0xdeadbeef',
# '0x0008: 0x0',
# '0x000c: 0x80496a8']
str(rop)
# '\\xfc\\x82\\x04\\x08\\xef\\xbe\\xad\\xde\\x00\\x00\\x00\\x00\\xa8\\x96\\x04\\x08'
"""
def __init__(self, elfs, base = None):
"""
Arguments:
elfs(list): List of pwnlib.elf.ELF objects for mining
"""
import ropgadget
# Permit singular ROP(elf) vs ROP([elf])
if isinstance(elfs, ELF):
elfs = [elfs]
elif isinstance(elfs, (str, unicode)):
elfs = [ELF(elfs)]
self.elfs = elfs
self._chain = []
self.base = base
self.align = max(e.elfclass for e in elfs)/8
self.migrated = False
self.__load()
def resolve(self, resolvable):
"""Resolves a symbol to an address
Arguments:
resolvable(str,int): Thing to convert into an address
Returns:
int containing address of 'resolvable', or None
"""
if isinstance(resolvable, str):
for elf in self.elfs:
if resolvable in elf.symbols:
return elf.symbols[resolvable]
if isinstance(resolvable, (int,long)):
return resolvable
return None
def unresolve(self, value):
"""Inverts 'resolve'. Given an address, it attempts to find a symbol
for it in the loaded ELF files. If none is found, it searches all
known gadgets, and returns the disassembly
Arguments:
value(int): Address to look up
Returns:
String containing the symbol name for the address, disassembly for a gadget
(if there's one at that address), or an empty string.
"""
for elf in self.elfs:
for name, addr in elf.symbols.items():
if addr == value:
return name
if value in self.gadgets:
return '; '.join(self.gadgets[value]['insns'])
return ''
def _output_struct(self, value, output):
next_index = len(output)
if isinstance(value, (int, long)):
return value
elif isinstance(value, (unicode, str)):
if isinstance(value, unicode):
value = value.encode('utf8')
while True:
value += '\x00'
if len(value) % self.align == 0:
break
output.append([value])
return (next_index,)
elif isinstance(value, (tuple, list)):
l = []
output.append(l)
for v in value:
l.append(self._output_struct(v, output))
return (next_index,)
else:
log.error("ROP: Cannot flatten value %r" % value)
def _build_x86(self):
# Stage 1:
# Convert every call in self._chain from a (addr, args) tuple
# into a (addr, pivot, args, pad) tuple.
#
# Stage 2:
# Micro-optimizations for the last call in the chain.
#
# Stage 3:
# Convert into a [[str/ints/refs]], where
# refs are references to one of the first lists and will be turned
# into pointers outside this function. Refs are represented as
# length-1 tuples.
if not self._chain:
return []
# Stage 1
chain = []
for addr, args in self._chain:
if not args:
chain.append([addr, [], [], 0])
else:
need = (1+len(args)) * self.align
best_pivot = None
best_size = None
for size, pivot in sorted(self.pivots.items()):
if size >= need:
best_pivot = pivot
best_size = size
break
if best_pivot == None:
log.error("Could not find gadget to clean up stack for call %r %r" % (addr, args))
chain.append([addr, [best_pivot], args, best_size/4 - len(args) - 1])
# Stage 2
# If the last call has arguments, there is no need
# to fix up the stack up for those arguments
if chain[-1][2]:
chain[-1][1] = [0xdeadbeef]
chain[-1][3] = 0
# If the last call does not have any arguments, there is no
# need to fix up the stack for the second-to-last call.
# We can put the last call as the pivot address for
# the second-to-last call.
if len(chain) > 1 and not chain[-1][2] and chain[-2][2]:
# This optimization does not work if a raw string is on the stack
if not isinstance(chain[-1][0], (str, unicode)):
chain[-2][1] = [chain[-1][0]]
chain[-2][3] = 0
chain.pop()
# Stage 3
outrop = []
output = [outrop]
for addr, pivot, args, pad in chain:
outrop.append(addr)
outrop.extend(pivot)
for arg in args:
outrop.append(self._output_struct(arg, output))
for _ in range(pad):
outrop.append('$$$$')
return output
def build(self, base = None):
"""Build the ROP chain into a list (addr, int/string, bool), where the
last value is True iff the value was an internal reference.
It is guaranteed that the individual parts are next to each other.
If there is no base available, then the returned addresses are indexed from 0.
Arguments:
base(int): The base address to build the rop-chain from. Defaults to
self.base.
"""
if base == None:
base = self.base
# Use the architecture specific builder to get a [[str/ints/refs]]
meth = '_build_' + self.elfs[0].get_machine_arch()
if not hasattr(self, meth):
log.error("Cannot build rop for architecture %r" % self.elfs[0].get_machine_arch())
rop = getattr(self, meth)()
# Stage 1
# Generate a dictionary {ref_id: addr}.
addrs = {}
if base != None:
addr = base
for i, l in enumerate(rop):
addrs[i] = addr
for v in l:
if isinstance(v, (int, long, tuple)):
addr += self.align
else:
addr += len(v)
# Stage 2:
# Convert into [(addr, int/string, bool)]
addr = base or 0
out = []
for l in rop:
for v in l:
if isinstance(v, (int, long)):
out.append((addr, v, False))
addr += self.align
elif isinstance(v, str):
out.append((addr, v, False))
addr += len(v)
elif isinstance(v, tuple):
if v[0] in addrs:
out.append((addr, addrs[v[0]], True))
addr += self.align
elif base != None:
log.error("ROP: References unknown structure index")
else:
log.error("ROP: Cannot use structures without a base address")
else:
log.error("ROP: Unexpected value: %r" % v)
return out
def chain(self):
"""Build the ROP chain
Returns:
str containging raw ROP bytes
"""
return packing.flat(
[value for addr, value, was_ref in self.build()],
word_size = 8*self.align
)
def dump(self):
"""Dump the ROP chain in an easy-to-read manner"""
result = []
rop = self.build(self.base or 0)
addrs = [addr for addr, value, was_ref in rop]
for addr, value, was_ref in rop:
if isinstance(value, str):
line = "0x%04x: %16r" % (addr, value.rstrip('\x00'))
elif isinstance(value, (int, long)):
if was_ref:
line = "0x%04x: %#16x (%+d)" % (
addr,
value,
value - addr
)
else:
ref = self.unresolve(value)
line = "0x%04x: %#16x%s" % (
addr,
value,
(' (%s)' % ref) if ref else ''
)
else:
log.error("ROP: ROP.build returned an unexpected value %r" % value)
result.append(line)
return '\n'.join(result)
def call(self, resolvable, arguments=()):
"""Add a call to the ROP chain
Arguments:
resolvable(str,int): Value which can be looked up via 'resolve',
or is already an integer.
arguments(list): List of arguments which can be passed to pack().
Alternately, if a base address is set, arbitrarily nested
structures of strings or integers can be provided.
"""
if self.migrated:
log.error("Cannot append to a migrated chain")
addr = self.resolve(resolvable)
if addr is None:
log.error("Could not resolve %r" % resolvable)
self._chain.append((addr, arguments))
def raw(self, value):
"""Adds a raw integer or string to the ROP chain.
If your architecture requires aligned values, then make
sure that any given string is aligned!
Arguments:
data(int/str): The raw value to put onto the rop chain.
"""
if self.migrated:
log.error("Cannot append to a migrated chain")
self._chain.append((value, ()))
def migrate(self, next_base):
"""Explicitly set $sp, by using a ``leave; ret`` gadget"""
if isinstance(next_base, ROP):
next_base = self.base
pop_sp = self.rsp or self.esp
pop_bp = self.rbp or self.ebp
leave = self.leave
if pop_sp and len(pop_sp[1]['regs']) == 1:
self.raw(pop_sp[0])
self.raw(next_base)
elif pop_bp and leave and len(pop_bp[1]['regs']) == 1:
self.raw(pop_bp[0])
self.raw(next_base-4)
self.raw(leave[0])
else:
log.error("Cannot find the gadgets to migrate")
self.migrated = True
def __str__(self):
"""Returns: Raw bytes of the ROP chain"""
return self.chain()
def __get_cachefile_name(self, elf):
basename = os.path.basename(elf.file.name)
md5sum = hashlib.md5(elf.get_data()).hexdigest()
filename = "%s-%s-%#x" % (basename, md5sum, elf.address)
cachedir = os.path.join(tempfile.gettempdir(), 'pwntools-rop-cache')
if not os.path.exists(cachedir):
os.mkdir(cachedir)
return os.path.join(cachedir, filename)
def __cache_load(self, elf):
filename = self.__get_cachefile_name(elf)
if os.path.exists(filename):
log.info_once("Loaded cached gadgets for %r @ %#x" % (elf.file.name, elf.address))
return eval(file(filename).read())
def __cache_save(self, elf, data):
file(self.__get_cachefile_name(elf),'w+').write(repr(data))
def __load(self):
"""Load all ROP gadgets for the selected ELF files"""
#
# We accept only instructions that look like these.
#
# - leave
# - pop reg
# - add $sp, value
# - ret
#
# Currently, ROPgadget does not detect multi-byte "C2" ret.
# https://github.com/JonathanSalwan/ROPgadget/issues/53
#
pop = re.compile(r'^pop (.{3})')
add = re.compile(r'^add .sp, (\S+)$')
ret = re.compile(r'^ret$')
leave = re.compile(r'^leave$')
#
# Validation routine
#
# >>> valid('pop eax')
# | |
<gh_stars>1000+
#!/usr/bin/env python
# Copyright 2021 The StackStorm Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Migration which which migrates data for existing objects in the database which utilize
EscapedDictField or EscapedDynamicField and have been updated to use new JsonDictField.
Migration step is idempotent and can be retried on failures / partial runs.
Keep in mind that running this migration script is optional and it may take a long time of you have
a lot of very large objects in the database (aka executions) - reading a lot of data from the
database using the old field types is slow and CPU intensive.
New field type is automatically used for all the new objects when upgrading to v3.5 so migration is
optional because in most cases users are viewing recent / new executions and not old ones which may
still utilize old field typo which is slow to read / write.
Right now the script utilizes no concurrency and performs migration one object by one. That's done
for simplicity reasons and also to avoid massive CPU usage spikes when running this script with
large concurrency on large objects.
Keep in mind that only "completed" objects are processes - this means Executions in "final" states
(succeeded, failed, timeout, etc.).
We determine if an object should be migrating using mongodb $type query (for execution objects we
could also determine that based on the presence of result_size field).
"""
import sys
import time
import datetime
import traceback
from oslo_config import cfg
from st2common import config
from st2common.service_setup import db_setup
from st2common.service_setup import db_teardown
from st2common.util import isotime
from st2common.models.db.execution import ActionExecutionDB
from st2common.models.db.workflow import WorkflowExecutionDB
from st2common.models.db.workflow import TaskExecutionDB
from st2common.models.db.trigger import TriggerInstanceDB
from st2common.persistence.execution import ActionExecution
from st2common.persistence.liveaction import LiveAction
from st2common.persistence.workflow import WorkflowExecution
from st2common.persistence.workflow import TaskExecution
from st2common.persistence.trigger import TriggerInstance
from st2common.exceptions.db import StackStormDBObjectNotFoundError
from st2common.constants.action import LIVEACTION_COMPLETED_STATES
from st2common.constants.triggers import TRIGGER_INSTANCE_COMPLETED_STATES
# NOTE: To avoid unnecessary mongoengine object churn when retrieving only object ids (aka to avoid
# instantiating model class with a single field), we use raw pymongo value which is a dict with a
# single value
def migrate_executions(start_dt: datetime.datetime, end_dt: datetime.datetime) -> None:
"""
Perform migrations for execution related objects (ActionExecutionDB, LiveActionDB).
"""
print("Migrating execution objects")
# NOTE: We first only retrieve the IDs because there could be a lot of objects in the database
# and this could result in massive ram use. Technically, mongoengine loads querysets lazily,
# but this is not always the case so it's better to first retrieve all the IDs and then retrieve
# objects one by one.
# Keep in mind we need to use ModelClass.objects and not PersistanceClass.query() so .only()
# works correctly - with PersistanceClass.query().only() all the fields will still be retrieved.
# 1. Migrate ActionExecutionDB objects
result = (
ActionExecutionDB.objects(
__raw__={
"result": {
"$not": {
"$type": "binData",
},
},
"status": {
"$in": LIVEACTION_COMPLETED_STATES,
},
},
start_timestamp__gte=start_dt,
start_timestamp__lte=end_dt,
)
.only("id")
.as_pymongo()
)
execution_ids = set([str(item["_id"]) for item in result])
objects_count = result.count()
if not execution_ids:
print("Found no ActionExecutionDB objects to migrate.")
print("")
return None
print("Will migrate %s ActionExecutionDB objects" % (objects_count))
print("")
for index, execution_id in enumerate(execution_ids, 1):
try:
execution_db = ActionExecution.get_by_id(execution_id)
except StackStormDBObjectNotFoundError:
print(
"Skipping ActionExecutionDB with id %s which is missing in the database"
% (execution_id)
)
continue
print(
"[%s/%s] Migrating ActionExecutionDB with id %s"
% (index, objects_count, execution_id)
)
# This is a bit of a "hack", but it's the easiest way to tell mongoengine that a specific
# field has been updated and should be saved. If we don't do, nothing will be re-saved on
# .save() call due to mongoengine only trying to save what has changed to make it more
# efficient instead of always re-saving the whole object.
execution_db._mark_as_changed("result")
execution_db._mark_as_changed("result_size")
# We need to explicitly set result_size attribute since Document.save() code path doesn't
# populate it (but other code paths we utilize elsewhere do).
# Technically we could do it on document clean() / validate() method, but we don't want that
# since execution update code in action runner and elsewhere is optimized to make partial
# updates more efficient.
result_size = len(
ActionExecutionDB.result._serialize_field_value(execution_db.result or {})
)
execution_db.result_size = result_size
# NOTE: If you want to view changed fields, you can access execution_db._changed_fields
execution_db.save()
print("ActionExecutionDB with id %s has been migrated" % (execution_db.id))
# Migrate corresponding LiveAction object
liveaction = execution_db.liveaction or {}
liveaction_id = liveaction.get("id", None)
if not liveaction_id:
continue
try:
liveaction_db = LiveAction.get_by_id(liveaction_id)
except StackStormDBObjectNotFoundError:
# If liveaction for some reason doesn't exist (would likely represent corrupted data) we
# simply ignore that error since it's not fatal.
print(
"Skipping LiveActionDB with id %s which is missing in the database"
% (liveaction_db)
)
continue
liveaction_db._mark_as_changed("result")
liveaction_db.save()
print("Related LiveActionDB with id %s has been migrated" % (liveaction_db.id))
print("")
def migrate_workflow_objects(
start_dt: datetime.datetime, end_dt: datetime.datetime
) -> None:
print("Migrating workflow objects")
# 1. Migrate WorkflowExecutionDB
result = (
WorkflowExecutionDB.objects(
__raw__={
"output": {
"$not": {
"$type": "binData",
},
},
"status": {
"$in": LIVEACTION_COMPLETED_STATES,
},
},
start_timestamp__gte=start_dt,
start_timestamp__lte=end_dt,
)
.only("id")
.as_pymongo()
)
workflow_execution_ids = [str(item["_id"]) for item in result]
objects_count = result.count()
if not workflow_execution_ids:
print("Found no WorkflowExecutionDB objects to migrate.")
print("")
else:
print("Will migrate %s WorkflowExecutionDB objects" % (objects_count))
print("")
for index, workflow_execution_id in enumerate(workflow_execution_ids, 1):
try:
workflow_execution_db = WorkflowExecution.get_by_id(workflow_execution_id)
except StackStormDBObjectNotFoundError:
print(
"Skipping WorkflowExecutionDB with id %s which is missing in the database"
% (workflow_execution_id)
)
continue
print(
"[%s/%s] Migrating WorkflowExecutionDB with id %s"
% (index, objects_count, workflow_execution_id)
)
workflow_execution_db._mark_as_changed("input")
workflow_execution_db._mark_as_changed("context")
workflow_execution_db._mark_as_changed("state")
workflow_execution_db._mark_as_changed("output")
workflow_execution_db.save()
print(
"WorkflowExecutionDB with id %s has been migrated"
% (workflow_execution_db.id)
)
print("")
# 2. Migrate TaskExecutionDB
result = (
TaskExecutionDB.objects(
__raw__={
"result": {
"$not": {
"$type": "binData",
},
},
"status": {
"$in": LIVEACTION_COMPLETED_STATES,
},
},
start_timestamp__gte=start_dt,
start_timestamp__lte=end_dt,
)
.only("id")
.as_pymongo()
)
task_execution_ids = [str(item["_id"]) for item in result]
objects_count = result.count()
if not task_execution_ids:
print("Found no TaskExecutionDB objects to migrate.")
print("")
else:
print("Will migrate %s TaskExecutionDB objects" % (objects_count))
print("")
for index, task_execution_id in enumerate(task_execution_ids, 1):
try:
task_execution_db = TaskExecution.get_by_id(task_execution_id)
except StackStormDBObjectNotFoundError:
print(
"Skipping TaskExecutionDB with id %s which is missing in the database"
% (task_execution_db)
)
continue
print(
"[%s/%s] Migrating TaskExecutionDB with id %s"
% (index, objects_count, task_execution_id)
)
task_execution_db._mark_as_changed("task_spec")
task_execution_db._mark_as_changed("context")
task_execution_db._mark_as_changed("result")
task_execution_db.save()
print("TaskExecutionDB with id %s has been migrated" % (task_execution_db.id))
print("")
def migrate_triggers(start_dt: datetime.datetime, end_dt: datetime.datetime) -> None:
print("Migrating trigger objects")
result = (
TriggerInstanceDB.objects(
__raw__={
"payload": {
"$not": {
"$type": "binData",
},
},
"status": {
"$in": TRIGGER_INSTANCE_COMPLETED_STATES,
},
},
occurrence_time__gte=start_dt,
occurrence_time__lte=end_dt,
)
.only("id")
.as_pymongo()
)
trigger_instance_ids = [str(item["_id"]) for item in result]
objects_count = result.count()
if not trigger_instance_ids:
print("Found no TriggerInstanceDB objects to migrate.")
print("")
return None
print("Will migrate %s TriggerInstanceDB objects" % (objects_count))
print("")
for index, trigger_instance_id in enumerate(trigger_instance_ids, 1):
try:
trigger_instance_db = TriggerInstance.get_by_id(trigger_instance_id)
except StackStormDBObjectNotFoundError:
print(
"Skipping TriggerInstanceDB with id %s which is missing in the database"
% (trigger_instance_id)
)
continue
print(
"[%s/%s] Migrating TriggerInstanceDB with id %s"
% (index, objects_count, trigger_instance_id)
)
trigger_instance_db._mark_as_changed("payload")
trigger_instance_db.save()
print(
"TriggerInstanceDB with id %s has been migrated" % (trigger_instance_db.id)
)
print("")
def migrate_objects(
start_dt: datetime.datetime, end_dt: datetime.datetime, display_prompt: bool = True
) -> None:
start_dt_str = start_dt.strftime("%Y-%m-%d %H:%M:%S")
end_dt_str = end_dt.strftime("%Y-%m-%d %H:%M:%S")
print("StackStorm v3.5 database field data migration script\n")
if display_prompt:
input(
"Will migrate objects with creation date between %s UTC and %s UTC.\n\n"
"You are strongly recommended to create database backup before proceeding.\n\n"
"Depending on the number of the objects in the database, "
"migration may take multiple hours or more. You are recommended to start the "
"script in a screen session, tmux or similar. \n\n"
"To proceed with the migration, press enter and to cancel it, press CTRL+C.\n"
% (start_dt_str, end_dt_str)
)
print("")
print(
"Migrating affected database objects between | |
# 2021-03-23 / last updated on 2021-04-08
# This code was made for use in the Fu lab
# by <NAME>
import numpy as np
import pandas as pd
from ..InstrumentHandler import GPIBInstrument
from ..units.frequency import frequency_units_dict
from ..units.UnitClass import UnitClass
class DataGenerator2020AInstrument(GPIBInstrument):
block_define_cmd = 'DATA:BLOCK:DEFINE'
memory_cmd = 'DATA:MSIZe'
mode_state_cmd = 'MODE:STATE'
oscillator_internal_frequency_cmd = 'SOURCE:OSCILLATOR:INTERNAL:FREQUENCY'
oscillator_internal_plllock_cmd = 'SOURCE:OSCILLATOR:INTERNAL:PLLlock'
oscillator_source_cmd = 'SOURCE:OSCILLATOR:SOURCE'
pattern_cmd = 'DATA:PATTern:BIT'
running_cmd = 'RUNNING'
sequence_add_cmd = 'DATA:SEQUENCE:ADD'
sequence_define_cmd = 'DATA:SEQUENCE:DEFINE'
subsequence_define_cmd = 'DATA:SUBSEQUENCE:DEFINE'
start_cmd = 'STARt'
stop_cmd = 'STOP'
# Hardware limited options
allowed_mode_states = ['REPEAT', 'SINGLE', 'STEP', 'ENHANCED']
allowed_oscillator_sources = ['INTERNAL', 'EXTERNAL']
allowed_oscillator_frequency_input_units = ['HZ', 'KHZ', 'MHZ']
oscillator_internal_frequency_limits_in_hz = [1e-2, 2e8]
bit_position_limits = [0, 35]
memory_length_limits = [1, 2**16]
start_address_limits = [0, 2**16 - 1]
sequence_repetition_limits = [1, 2**16]
# for all reads except DATA:PATTERN:BIT, the output of the datagenerator of a query command '[CMD]?' is:
# output_string = ':[CMD] [OUTPUT]\n'
# Hence, to convert the response to a number or bool, we take the substring: output_string[len([CMD])+2:-1]
def __init__(self, device_name='', memory=None, oscillator_internal_frequency_in_hz=None, timeout=20000, verbose=1,
initialize_at_definition=True):
# initializing the class as part of the daq superclass (which is part of the instrument super class)
super().__init__(device_name, verbose=verbose, initialize_at_definition=initialize_at_definition)
# set the operation timeout in microseconds of the data generator. If the generator takes longer than the
# timeout, it will throw an error.
self.instrument.timeout = timeout
# initializing memory and oscillator internal frequency. They can be kept the same as before if left blank,
# and changed later if needed. We make sure to change them after the DG stops.
self.was_running_on_initialization = self.is_running()
if initialize_at_definition and not self.was_running_on_initialization:
if oscillator_internal_frequency_in_hz is not None:
self.set_oscillator_internal_frequency(oscillator_internal_frequency_in_hz, units='Hz')
if memory is not None:
self.set_memory(memory)
# if it was running on initialization (True), the above if will not be completed, hence we need to set
# the parameters in the future (True)
self.need_to_set_initiazation_parameters = self.was_running_on_initialization
if self.need_to_set_initiazation_parameters:
self.stored_initialization_frequency_hz = oscillator_internal_frequency_in_hz
self.stored_initialization_memory_size = memory
# self.simple_write(self.clear_command)
@staticmethod
def get_cmd_response_string(read_line, cmd):
return read_line[len(cmd) + 2:-1]
@staticmethod
def pattern_array_to_string(pattern_array):
# Takes an numpy.ndarray or list, converts it to string, the split erases the empty spaces and seperates
# 0s and 1s, join brings them all together again, and [1:-1] because the string is of the form '[01101]'
# np.array2string(pattern_array, , precision=0, separator='', suppress_small=True)
# return ''.join(str(pattern_array).split())[1:-1]
pat_str = [str(element) for element in pattern_array]
return ''.join(pat_str)
@staticmethod
def pattern_string_to_array(pattern_string):
# take the character and convert it to int for all characters in the pattern string
return np.array([int(character) for character in pattern_string])
@staticmethod
def join_pattern_arrays(pattern_list):
if not (isinstance(pattern_list, np.ndarray) or isinstance(pattern_list, list)):
raise TypeError('Pattern_list in not numpy.ndarray or list type')
if not (np.all([isinstance(pattern, np.ndarray) for pattern in pattern_list]) or
np.all([isinstance(pattern, list) for pattern in pattern_list])):
raise TypeError('One or more patterns in pattern_list are of different type AND/OR'
' not numpy.ndarray or list type.')
# take an empty numpy.ndarray and append each pattern given in the pattern list, with index priority
# (1st pattern is first in the final pattern, 2nd pattern is second in the final pattern etc.)
final_pattern = np.array(np.concatenate(pattern_list, axis=0))
return final_pattern
@staticmethod
def repeat_pattern_array(pattern_array, repetitions):
if not isinstance(pattern_array, np.ndarray) or not isinstance(pattern_array, list):
raise TypeError('Pattern_array in not numpy.ndarray or list type')
# similar to join_pattern_arrays, takes a zero numpy.ndarray & adds to it the given pattern 'repetitions' times
final_pattern_array = np.array([])
for j in range(repetitions):
final_pattern_array = np.append(final_pattern_array, pattern_array)
return final_pattern_array
def generate_OFF_pattern_array(self, length=None):
# returns an array of zeros, of the specified length
if length is None:
length = self.get_memory()
pattern = np.zeros(length, dtype=int)
return pattern
def generate_ON_pattern_array(self, length=None):
# returns an array of ones, of the specified length
if length is None:
length = self.get_memory()
pattern = np.ones(length, dtype=int)
return pattern
def generate_OFF_ON_pattern_array(self, size=1, length=None):
# returns an array of zeros-ones, of the specified length. Step size is determined from size
if length is None:
length = self.get_memory()
pattern = np.array([int(np.ceil((i + 1 + size)/size) % 2) for i in range(length)])
return pattern
def generate_ON_OFF_pattern_array(self, size=1, length=None):
# returns an array of ones-zeros, of the specified length. Step size is determined from size
if length is None:
length = self.get_memory()
pattern = np.array([int(np.ceil((i + 1)/size) % 2) for i in range(length)])
return pattern
def generate_complex_pattern_array(self, dataframe):
# The first key 'patterns' of the dictionary is the pattern arrays in nd.array or str
# The second key 'repetitions' is the times the pattern of the same index is repeated
# The values on the first index go first, then the ones on the second index, etc
if not isinstance(dataframe, pd.DataFrame):
raise TypeError('Must be given pandas.DataFrame type.')
if 'patterns' not in dataframe.keys() or 'repetitions' not in dataframe.keys():
raise ValueError("One or more required keys were not found.\n"
"Required keys: 1. \'patterns\' and 2. \'repetitions\'.")
pattern_list = []
for i, pattern in enumerate(dataframe['patterns']):
if isinstance(pattern, str):
pattern = self.pattern_string_to_array(pattern)
elif not isinstance(pattern, np.ndarray):
raise ValueError('Pattern on dataframe index {:.0f} was neither a string nor a numpy.ndarray.')
pattern_list.append(self.repeat_pattern_array(pattern, dataframe['repetitions'][i]))
final_pattern = self.join_pattern_arrays(pattern_list)
return final_pattern
def define_block(self, names, starting_positions):
if isinstance(names, str):
names = [names]
if not np.all([isinstance(name, str) for name in names]):
raise TypeError('Data generator block names must be a string or a list of strings.')
if not np.all([len(names) <= 8 for name in names]):
raise TypeError('Data generator block names must be a string or a list of strings.')
if isinstance(starting_positions, int):
starting_positions = [starting_positions]
if not np.all([isinstance(sp, int) for sp in starting_positions]):
raise TypeError('Data generator block starting positions must be a integer or a list of integers.')
if not np.all(np.diff(starting_positions)) > 0:
raise ValueError('Data generator block starting positions must be different from each other and increase'
' in value')
if len(names) != len(starting_positions):
raise ValueError('Data generator block names and start_positions list lengths need to be equal.')
# convert numbers to strings
starting_positions = [str(r) for r in starting_positions]
# construct a string of the type [block1start_pos],[block1name]\n[block2start_pos],[block2name]...
block_definition_string = '\n'.join([starting_positions[i] + ',' + names[i].upper() for i in range(len(names))])
definition_length = str(len(block_definition_string))
definition_length_digits = str(len(definition_length))
command = '\n' + self.block_define_cmd + ' #' + definition_length_digits + definition_length +\
block_definition_string + '\n' + '\n'
return self.simple_write(command)
def set_memory(self, memory):
if isinstance(memory, int):
memory = str(memory)
else:
raise TypeError('Data generator memory must be an integer.')
return self.simple_write(self.memory_cmd + ' ' + memory)
def get_memory(self):
self.simple_write(self.memory_cmd + '?')
read_line = self.simple_read()
value = self.get_cmd_response_string(read_line, self.memory_cmd)
return int(value)
def set_mode_state(self, mode_state):
if mode_state.upper() not in self.allowed_mode_states:
raise ValueError('Data generator mode state can only be one of: ' + str(self.allowed_mode_states))
return self.simple_write(self.mode_state_cmd + ' ' + mode_state.upper())
def get_mode_state(self):
read_line = self.simple_write(self.mode_state_cmd + '?')
value = self.get_cmd_response_string(read_line, self.mode_state_cmd)
return value
def set_oscillator_internal_frequency(self, freq, units='MHz'):
if not (isinstance(freq, float) or isinstance(freq, int)):
raise TypeError('Data generator oscillator internal frequency must be an integer or a float.')
if units.upper() not in self.allowed_oscillator_frequency_input_units:
raise ValueError('Data Generator internal frequency input units can only be Hz, kHz, or MHz.')
# print(UnitClass(freq, units))
if not (self.oscillator_internal_frequency_limits_in_hz[0] <= UnitClass(freq, units).Hz <=
self.oscillator_internal_frequency_limits_in_hz[1]):
raise ValueError('Data Generator internal frequency input should be between 1e-2 and 2e8 Hz')
freq = "{:.1f}".format(freq)
return self.simple_write(self.oscillator_internal_frequency_cmd + ' ' + freq + units.upper())
def get_oscillator_internal_frequency(self, units='MHz'):
if units not in frequency_units_dict.keys():
raise ValueError('Data generator oscillator internal frequency units requested were not recognized.')
self.simple_write(self.oscillator_internal_frequency_cmd + '?')
read_line = self.simple_read()
value = self.get_cmd_response_string(read_line, self.oscillator_internal_frequency_cmd)
return UnitClass(float(value)/frequency_units_dict['Hz']*frequency_units_dict[units], units)
def set_oscillator_internal_plllock(self, value):
if isinstance(value, bool):
value = str(int(value))
else:
raise TypeError('Data generator oscillator internal ppllock value must be a boolean.')
return self.simple_write(self.oscillator_internal_plllock_cmd + ' ' + value)
def get_oscillator_internal_plllock(self):
self.simple_write(self.oscillator_internal_plllock_cmd + '?')
read_line = self.simple_read()
value = self.get_cmd_response_string(read_line, self.oscillator_internal_plllock_cmd)
return bool(int(value))
def set_oscillator_source(self, oscillator_source):
if oscillator_source.upper() not in self.allowed_oscillator_sources:
raise ValueError('Data generator mode state can only be one of: ' + str(self.allowed_oscillator_sources))
return self.simple_write(self.oscillator_source_cmd + ' ' + oscillator_source.upper())
def get_oscillator_source(self):
self.simple_write(self.oscillator_source_cmd + '?')
read_line = self.simple_read()
value = self.get_cmd_response_string(read_line, self.oscillator_source_cmd)
return value
def set_pattern(self, bit_position, pattern_array, start_address=0, length=None):
# you can set a list/numpy.ndarray of bit_positions with the same pattern at once
if isinstance(bit_position, int):
bit_position = [bit_position]
| |
#!/usr/bin/env python
"""API handlers for accessing hunts."""
import functools
import itertools
import logging
import operator
import re
from grr import config
from grr.gui import api_call_handler_base
from grr.gui import api_call_handler_utils
from grr.gui.api_plugins import client as api_client
from grr.gui.api_plugins import flow as api_flow
from grr.gui.api_plugins import output_plugin as api_output_plugin
from grr.gui.api_plugins import vfs as api_vfs
from grr.lib import rdfvalue
from grr.lib import utils
from grr.lib.rdfvalues import client as rdf_client
from grr.lib.rdfvalues import hunts as rdf_hunts
from grr.lib.rdfvalues import stats as rdf_stats
from grr.lib.rdfvalues import structs as rdf_structs
from grr_response_proto.api import hunt_pb2
from grr.server import aff4
from grr.server import events
from grr.server import flow
from grr.server import foreman
from grr.server import instant_output_plugin
from grr.server import output_plugin
from grr.server.aff4_objects import users as aff4_users
from grr.server.flows.general import export
from grr.server.hunts import implementation
from grr.server.hunts import standard
HUNTS_ROOT_PATH = rdfvalue.RDFURN("aff4:/hunts")
class HuntNotFoundError(api_call_handler_base.ResourceNotFoundError):
"""Raised when a hunt could not be found."""
class HuntFileNotFoundError(api_call_handler_base.ResourceNotFoundError):
"""Raised when a hunt file could not be found."""
class Error(Exception):
pass
class InvalidHuntStateError(Error):
pass
class HuntNotStartableError(Error):
pass
class HuntNotStoppableError(Error):
pass
class HuntNotModifiableError(Error):
pass
class HuntNotDeletableError(Error):
pass
class ApiHuntId(rdfvalue.RDFString):
"""Class encapsulating hunt ids."""
def __init__(self, initializer=None, age=None):
super(ApiHuntId, self).__init__(initializer=initializer, age=age)
# TODO(user): move this to a separate validation method when
# common RDFValues validation approach is implemented.
if self._value:
try:
rdfvalue.SessionID.ValidateID(self._value)
except ValueError as e:
raise ValueError("Invalid hunt id: %s (%s)" %
(utils.SmartStr(self._value), e))
def ToURN(self):
if not self._value:
raise ValueError("can't call ToURN() on an empty hunt id.")
return HUNTS_ROOT_PATH.Add(self._value)
class ApiHuntReference(rdf_structs.RDFProtoStruct):
protobuf = hunt_pb2.ApiHuntReference
rdf_deps = [
ApiHuntId,
]
def FromHuntReference(self, reference):
self.hunt_id = reference.hunt_id
return self
class ApiFlowLikeObjectReference(rdf_structs.RDFProtoStruct):
protobuf = hunt_pb2.ApiFlowLikeObjectReference
rdf_deps = [
ApiHuntReference,
api_flow.ApiFlowReference,
]
def FromFlowLikeObjectReference(self, reference):
self.object_type = reference.object_type
if reference.object_type == "HUNT_REFERENCE":
self.hunt_reference = ApiHuntReference().FromHuntReference(
reference.hunt_reference)
elif reference.object_type == "FLOW_REFERENCE":
self.flow_reference = api_flow.ApiFlowReference().FromFlowReference(
reference.flow_reference)
return self
class ApiHunt(rdf_structs.RDFProtoStruct):
"""ApiHunt is used when rendering responses.
ApiHunt is meant to be more lightweight than automatically generated AFF4
representation. It's also meant to contain only the information needed by
the UI and and to not expose implementation defails.
"""
protobuf = hunt_pb2.ApiHunt
rdf_deps = [
ApiHuntId,
ApiFlowLikeObjectReference,
foreman.ForemanClientRuleSet,
rdf_hunts.HuntRunnerArgs,
rdfvalue.RDFDatetime,
rdfvalue.SessionID,
]
def GetFlowArgsClass(self):
if self.flow_name:
flow_cls = flow.GRRFlow.classes.get(self.flow_name)
if flow_cls is None:
raise ValueError(
"Flow %s not known by this implementation." % self.flow_name)
# The required protobuf for this class is in args_type.
return flow_cls.args_type
def InitFromAff4Object(self, hunt, with_full_summary=False):
try:
runner = hunt.GetRunner()
context = runner.context
self.urn = hunt.urn
self.hunt_id = hunt.urn.Basename()
self.name = hunt.runner_args.hunt_name
self.state = str(hunt.Get(hunt.Schema.STATE))
self.crash_limit = hunt.runner_args.crash_limit
self.client_limit = hunt.runner_args.client_limit
self.client_rate = hunt.runner_args.client_rate
self.created = context.create_time
self.expires = context.expires
self.creator = context.creator
self.description = hunt.runner_args.description
self.is_robot = context.creator == "GRRWorker"
self.results_count = context.results_count
self.clients_with_results_count = context.clients_with_results_count
self.clients_queued_count = context.clients_queued_count
if hunt.runner_args.original_object.object_type != "UNKNOWN":
ref = ApiFlowLikeObjectReference()
self.original_object = ref.FromFlowLikeObjectReference(
hunt.runner_args.original_object)
hunt_stats = context.usage_stats
self.total_cpu_usage = hunt_stats.user_cpu_stats.sum
self.total_net_usage = hunt_stats.network_bytes_sent_stats.sum
if with_full_summary:
# This is an expensive call. Avoid it if not needed.
all_clients_count, completed_clients_count, _ = hunt.GetClientsCounts()
self.all_clients_count = all_clients_count
self.completed_clients_count = completed_clients_count
self.remaining_clients_count = (
all_clients_count - completed_clients_count)
self.hunt_runner_args = hunt.runner_args
self.client_rule_set = runner.runner_args.client_rule_set
# We assume we deal here with a GenericHunt and hence hunt.args is a
# GenericHuntArgs instance. But if we have another kind of hunt
# (VariableGenericHunt is the only other kind of hunt at the
# moment), then we shouldn't raise.
if hunt.args.HasField("flow_runner_args"):
self.flow_name = hunt.args.flow_runner_args.flow_name
if self.flow_name and hunt.args.HasField("flow_args"):
self.flow_args = hunt.args.flow_args
except Exception as e: # pylint: disable=broad-except
self.internal_error = "Error while opening hunt: %s" % str(e)
return self
class ApiHuntResult(rdf_structs.RDFProtoStruct):
"""API hunt results object."""
protobuf = hunt_pb2.ApiHuntResult
rdf_deps = [
api_client.ApiClientId,
rdfvalue.RDFDatetime,
]
def GetPayloadClass(self):
return rdfvalue.RDFValue.classes[self.payload_type]
def InitFromGrrMessage(self, message):
if message.source:
self.client_id = message.source.Basename()
self.payload_type = message.payload.__class__.__name__
self.payload = message.payload
self.timestamp = message.age
return self
class ApiHuntClient(rdf_structs.RDFProtoStruct):
protobuf = hunt_pb2.ApiHuntClient
rdf_deps = [
api_client.ApiClientId,
api_flow.ApiFlowId,
]
class ApiHuntLog(rdf_structs.RDFProtoStruct):
protobuf = hunt_pb2.ApiHuntLog
rdf_deps = [
api_client.ApiClientId,
api_flow.ApiFlowId,
rdfvalue.RDFDatetime,
]
def InitFromFlowLog(self, fl):
if fl.HasField("client_id"):
self.client_id = fl.client_id.Basename()
if fl.HasField("urn"):
self.flow_id = fl.urn.RelativeName(fl.client_id)
self.timestamp = fl.age
self.log_message = fl.log_message
self.flow_name = fl.flow_name
return self
class ApiHuntError(rdf_structs.RDFProtoStruct):
protobuf = hunt_pb2.ApiHuntError
rdf_deps = [
api_client.ApiClientId,
rdfvalue.RDFDatetime,
]
def InitFromHuntError(self, he):
if he.HasField("client_id"):
self.client_id = he.client_id.Basename()
if he.HasField("backtrace"):
self.backtrace = he.backtrace
self.log_message = he.log_message
self.timestamp = he.age
return self
class ApiListHuntsArgs(rdf_structs.RDFProtoStruct):
protobuf = hunt_pb2.ApiListHuntsArgs
rdf_deps = [
rdfvalue.Duration,
]
class ApiListHuntsResult(rdf_structs.RDFProtoStruct):
protobuf = hunt_pb2.ApiListHuntsResult
rdf_deps = [
ApiHunt,
]
class ApiListHuntsHandler(api_call_handler_base.ApiCallHandler):
"""Renders list of available hunts."""
args_type = ApiListHuntsArgs
result_type = ApiListHuntsResult
def _BuildHuntList(self, hunt_list):
hunt_list = sorted(
hunt_list,
reverse=True,
key=lambda hunt: hunt.GetRunner().context.create_time)
return [ApiHunt().InitFromAff4Object(hunt_obj) for hunt_obj in hunt_list]
def _CreatedByFilter(self, username, hunt_obj):
return hunt_obj.context.creator == username
def _DescriptionContainsFilter(self, substring, hunt_obj):
return substring in hunt_obj.runner_args.description
def _Username(self, username, token):
if username == "me":
return token.username
else:
return username
def _BuildFilter(self, args, token):
filters = []
if ((args.created_by or args.description_contains) and
not args.active_within):
raise ValueError("created_by/description_contains filters have to be "
"used together with active_within filter (to prevent "
"queries of death)")
if args.created_by:
filters.append(
functools.partial(self._CreatedByFilter,
self._Username(args.created_by, token)))
if args.description_contains:
filters.append(
functools.partial(self._DescriptionContainsFilter,
args.description_contains))
if filters:
def Filter(x):
for f in filters:
if not f(x):
return False
return True
return Filter
else:
return None
def HandleNonFiltered(self, args, token):
fd = aff4.FACTORY.Open("aff4:/hunts", mode="r", token=token)
children = list(fd.ListChildren())
total_count = len(children)
children.sort(key=operator.attrgetter("age"), reverse=True)
if args.count:
children = children[args.offset:args.offset + args.count]
else:
children = children[args.offset:]
hunt_list = []
for hunt in fd.OpenChildren(children=children):
# Legacy hunts may have hunt.context == None: we just want to skip them.
if not isinstance(hunt, implementation.GRRHunt) or not hunt.context:
continue
hunt_list.append(hunt)
return ApiListHuntsResult(
total_count=total_count, items=self._BuildHuntList(hunt_list))
def HandleFiltered(self, filter_func, args, token):
fd = aff4.FACTORY.Open("aff4:/hunts", mode="r", token=token)
children = list(fd.ListChildren())
children.sort(key=operator.attrgetter("age"), reverse=True)
if not args.active_within:
raise ValueError("active_within filter has to be used when "
"any kind of filtering is done (to prevent "
"queries of death)")
min_age = rdfvalue.RDFDatetime.Now() - args.active_within
active_children = []
for child in children:
if child.age > min_age:
active_children.append(child)
else:
break
index = 0
hunt_list = []
active_children_map = {}
for hunt in fd.OpenChildren(children=active_children):
# Legacy hunts may have hunt.context == None: we just want to skip them.
if (not isinstance(hunt, implementation.GRRHunt) or not hunt.context or
not filter_func(hunt)):
continue
active_children_map[hunt.urn] = hunt
for urn in active_children:
try:
hunt = active_children_map[urn]
except KeyError:
continue
if index >= args.offset:
hunt_list.append(hunt)
index += 1
if args.count and len(hunt_list) >= args.count:
break
return ApiListHuntsResult(items=self._BuildHuntList(hunt_list))
def Handle(self, args, token=None):
filter_func = self._BuildFilter(args, token)
if not filter_func and args.active_within:
# If no filters except for "active_within" were specified, just use
# a stub filter function that always returns True. Filtering by
# active_within is done by HandleFiltered code before other filters
# are applied.
filter_func = lambda x: True
if filter_func:
return self.HandleFiltered(filter_func, args, token)
else:
return self.HandleNonFiltered(args, token)
class ApiGetHuntArgs(rdf_structs.RDFProtoStruct):
protobuf = hunt_pb2.ApiGetHuntArgs
rdf_deps = [
ApiHuntId,
]
class ApiGetHuntHandler(api_call_handler_base.ApiCallHandler):
"""Renders hunt's summary."""
args_type = ApiGetHuntArgs
result_type = ApiHunt
def Handle(self, args, token=None):
try:
hunt = aff4.FACTORY.Open(
args.hunt_id.ToURN(), aff4_type=implementation.GRRHunt, token=token)
return ApiHunt().InitFromAff4Object(hunt, with_full_summary=True)
except aff4.InstantiationError:
raise HuntNotFoundError(
"Hunt with id %s could not be found" % args.hunt_id)
class ApiListHuntResultsArgs(rdf_structs.RDFProtoStruct):
protobuf = hunt_pb2.ApiListHuntResultsArgs
rdf_deps = [
ApiHuntId,
]
class ApiListHuntResultsResult(rdf_structs.RDFProtoStruct):
protobuf = hunt_pb2.ApiListHuntResultsResult
rdf_deps = [
ApiHuntResult,
]
class ApiListHuntResultsHandler(api_call_handler_base.ApiCallHandler):
"""Renders hunt results."""
args_type = ApiListHuntResultsArgs
result_type = ApiListHuntResultsResult
def Handle(self, args, token=None):
results_collection = implementation.GRRHunt.ResultCollectionForHID(
args.hunt_id.ToURN())
items = api_call_handler_utils.FilterCollection(
results_collection, args.offset, args.count, args.filter)
wrapped_items = [ApiHuntResult().InitFromGrrMessage(item) for item in items]
return ApiListHuntResultsResult(
items=wrapped_items, total_count=len(results_collection))
class ApiListHuntCrashesArgs(rdf_structs.RDFProtoStruct):
protobuf = hunt_pb2.ApiListHuntCrashesArgs
rdf_deps = [
ApiHuntId,
]
class ApiListHuntCrashesResult(rdf_structs.RDFProtoStruct):
protobuf = hunt_pb2.ApiListHuntCrashesResult
rdf_deps = [
rdf_client.ClientCrash,
]
class ApiListHuntCrashesHandler(api_call_handler_base.ApiCallHandler):
"""Returns a list of client crashes for the given hunt."""
args_type = ApiListHuntCrashesArgs
result_type = ApiListHuntCrashesResult
def Handle(self, args, token=None):
crashes = implementation.GRRHunt.CrashCollectionForHID(args.hunt_id.ToURN())
total_count = len(crashes)
result = api_call_handler_utils.FilterCollection(crashes, args.offset,
args.count, args.filter)
return ApiListHuntCrashesResult(items=result, total_count=total_count)
class ApiGetHuntResultsExportCommandArgs(rdf_structs.RDFProtoStruct):
protobuf = hunt_pb2.ApiGetHuntResultsExportCommandArgs
rdf_deps = [
ApiHuntId,
]
class ApiGetHuntResultsExportCommandResult(rdf_structs.RDFProtoStruct):
protobuf = hunt_pb2.ApiGetHuntResultsExportCommandResult
class ApiGetHuntResultsExportCommandHandler(
api_call_handler_base.ApiCallHandler):
"""Renders GRR export tool command line that exports hunt results."""
args_type = ApiGetHuntResultsExportCommandArgs
result_type = ApiGetHuntResultsExportCommandResult
def Handle(self, args, token=None):
output_fname = re.sub("[^0-9a-zA-Z]+", "_", utils.SmartStr(args.hunt_id))
code_to_execute = ("""grrapi.Hunt("%s").GetFilesArchive()."""
"""WriteToFile("./hunt_results_%s.zip")""") % (
args.hunt_id, output_fname)
export_command_str = " ".join([
config.CONFIG["AdminUI.export_command"], "--exec_code",
utils.ShellQuote(code_to_execute)
])
return ApiGetHuntResultsExportCommandResult(command=export_command_str)
class ApiListHuntOutputPluginsArgs(rdf_structs.RDFProtoStruct):
protobuf = hunt_pb2.ApiListHuntOutputPluginsArgs
rdf_deps = [
ApiHuntId,
]
class ApiListHuntOutputPluginsResult(rdf_structs.RDFProtoStruct):
protobuf = hunt_pb2.ApiListHuntOutputPluginsResult
rdf_deps = [
api_output_plugin.ApiOutputPlugin,
]
class ApiListHuntOutputPluginsHandler(api_call_handler_base.ApiCallHandler):
"""Renders hunt's output plugins states."""
args_type = ApiListHuntOutputPluginsArgs
result_type = ApiListHuntOutputPluginsResult
def Handle(self, args, token=None):
metadata = aff4.FACTORY.Create(
args.hunt_id.ToURN().Add("ResultsMetadata"),
mode="r",
aff4_type=implementation.HuntResultsMetadata,
token=token)
plugins = metadata.Get(metadata.Schema.OUTPUT_PLUGINS, | |
<filename>captum/attr/_utils/attribution.py<gh_stars>0
#!/usr/bin/env python3
import torch
import torch.nn.functional as F
from .common import (
_run_forward,
_format_input_baseline,
_format_tensor_into_tuples,
_format_additional_forward_args,
_validate_input,
_validate_target,
_tensorize_baseline,
)
from .gradient import compute_gradients
class Attribution:
r"""
All attribution algorithms extend this class. It enforces its child classes
to extend and override core `attribute` method.
"""
def __init__(self, forward_func):
r"""
Args:
forward_func (callable or torch.nn.Module): This can either be an instance
of pytorch model or any modification of model's forward
function.
"""
self.forward_func = forward_func
def attribute(self, inputs):
r"""
This method computes and returns the attribution values for each input tensor.
Deriving classes are responsible for implementing its logic accordingly.
Specific attribution algorithms that extend this class take relevant
additional arguments.
Args:
inputs (tensor or tuple of tensors): Input for which attribution
is computed. It can be provided as a single tensor or
a tuple of multiple tensors. If multiple input tensors
are provided, the batch sizes must be aligned accross all
tensors.
Returns:
*tensor* or tuple of *tensors* of **attributions**:
- **attributions** (*tensor* or tuple of *tensors*):
Attribution values for each
input tensor. The `attributions` have the same shape and
dimensionality as the inputs.
If a single tensor is provided as inputs, a single tensor
is returned. If a tuple is provided for inputs, a tuple of
corresponding sized tensors is returned.
"""
raise NotImplementedError("Deriving class should implement attribute method")
def has_convergence_delta(self):
r"""
This method informs the user whether the attribution algorithm provides
a convergence delta (aka an approximation error) or not. Convergence
delta may serve as a proxy of correctness of attribution algorithm's
approximation. If deriving attribution class provides a
`compute_convergence_delta` method, it should
override both `compute_convergence_delta` and `has_convergence_delta` methods.
Returns:
bool:
Returns whether the attribution algorithm
provides a convergence delta (aka approximation error) or not.
"""
return False
def compute_convergence_delta(self, attributions, *args):
r"""
The attribution algorithms which derive `Attribution` class and provide
convergence delta (aka approximation error) should implement this method.
Convergence delta can be computed based on certain properties of the
attribution alogrithms.
Args:
attributions (tensor or tuple of tensors): Attribution scores that
are precomputed by an attribution algorithm.
Attributions can be provided in form of a single tensor
or a tuple of those. It is assumed that attribution
tensor's dimension 0 corresponds to the number of
examples, and if multiple input tensors are provided,
the examples must be aligned appropriately.
*args (optional): Additonal arguments that are used by the
sub-classes depending on the specific implementation
of `compute_convergence_delta`.
Returns:
*tensor* of **deltas**:
- **deltas** (*tensor*):
Depending on specific implementaion of
sub-classes, convergence delta can be returned per
sample in form of a tensor or it can be aggregated
across multuple samples and returned in form of a
single floating point tensor.
"""
raise NotImplementedError(
"Deriving sub-class should implement" " compute_convergence_delta method"
)
class GradientAttribution(Attribution):
r"""
All gradient based attribution algorithms extend this class. It requires a
forward function, which most commonly is the forward function of the model
that we want to interpret or the model itself.
"""
def __init__(self, forward_func):
r"""
Args:
forward_func (callable or torch.nn.Module): This can either be an instance
of pytorch model or any modification of model's forward
function.
"""
Attribution.__init__(self, forward_func)
self.gradient_func = compute_gradients
def compute_convergence_delta(
self,
attributions,
start_point,
end_point,
target=None,
additional_forward_args=None,
):
r"""
Here we provide a specific implementation for `compute_convergence_delta`
which is based on a common property among gradient-based attribution algorithms.
In the literature sometimes it is also called completeness axiom. Completeness
axiom states that the sum of the attribution must be equal to the differences of
NN Models's function at its end and start points. In other words:
sum(attributions) - (F(end_point) - F(start_point)) is close to zero.
Returned delta of this method is defined as above stated difference.
This implementation assumes that both the `start_point` and `end_point` have
the same shape and dimensionality. It also assumes that the target must have
the same number of examples as the `start_point` and the `end_point` in case
it is provided in form of a list or a non-singleton tensor.
Args:
attributions (tensor or tuple of tensors): Precomputed attribution
scores. The user can compute those using any attribution
algorithm. It is assumed the the shape and the
dimensionality of attributions must match the shape and
the dimensionality of `start_point` and `end_point`.
It also assumes that the attribution tensor's
dimension 0 corresponds to the number of
examples, and if multiple input tensors are provided,
the examples must be aligned appropriately.
start_point (tensor or tuple of tensors, optional): `start_point`
is passed as an input to model's forward function. It
is the starting point of attributions' approximation.
It is assumed that both `start_point` and `end_point`
have the same shape and dimensionality.
end_point (tensor or tuple of tensors): `end_point`
is passed as an input to model's forward function. It
is the end point of attributions' approximation.
It is assumed that both `start_point` and `end_point`
have the same shape and dimensionality.
target (int, tuple, tensor or list, optional): Output indices for
which gradients are computed (for classification cases,
this is usually the target class).
If the network returns a scalar value per example,
no target index is necessary.
For general 2D outputs, targets can be either:
- a single integer or a tensor containing a single
integer, which is applied to all input examples
- a list of integers or a 1D tensor, with length matching
the number of examples in inputs (dim 0). Each integer
is applied as the target for the corresponding example.
For outputs with > 2 dimensions, targets can be either:
- A single tuple, which contains #output_dims - 1
elements. This target index is applied to all examples.
- A list of tuples with length equal to the number of
examples in inputs (dim 0), and each tuple containing
#output_dims - 1 elements. Each tuple is applied as the
target for the corresponding example.
Default: None
additional_forward_args (tuple, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. It must be either a single additional
argument of a Tensor or arbitrary (non-tuple) type or a
tuple containing multiple additional arguments including
tensors or any arbitrary python types. These arguments
are provided to forward_func in order following the
arguments in inputs.
For a tensor, the first dimension of the tensor must
correspond to the number of examples.
`additional_forward_args` is used both for `start_point`
and `end_point` when computing the forward pass.
Default: None
Returns:
*tensor* of **deltas**:
- **deltas** (*tensor*):
This implementation returns convergence delta per
sample. Deriving sub-classes may do any type of aggregation
of those values, if necessary.
"""
end_point, start_point = _format_input_baseline(end_point, start_point)
additional_forward_args = _format_additional_forward_args(
additional_forward_args
)
# tensorizing start_point in case it is a scalar or one example baseline
# If the batch size is large we could potentially also tensorize only one
# sample and expand the output to the rest of the elements in the batch
start_point = _tensorize_baseline(end_point, start_point)
attributions = _format_tensor_into_tuples(attributions)
# verify that the attributions and end_point match on 1st dimension
for attribution, end_point_tnsr in zip(attributions, end_point):
assert end_point_tnsr.shape[0] == attribution.shape[0], (
"Attributions tensor and the end_point must match on the first"
" dimension but found attribution: {} and end_point: {}".format(
attribution.shape[0], end_point_tnsr.shape[0]
)
)
num_samples = end_point[0].shape[0]
_validate_input(end_point, start_point)
_validate_target(num_samples, target)
def _sum_rows(input):
return input.view(input.shape[0], -1).sum(1)
with torch.no_grad():
start_point = _sum_rows(
_run_forward(
self.forward_func, start_point, target, additional_forward_args
)
)
end_point = _sum_rows(
_run_forward(
self.forward_func, end_point, target, additional_forward_args
)
)
row_sums = [_sum_rows(attribution) for attribution in attributions]
attr_sum = torch.stack([sum(row_sum) for row_sum in zip(*row_sums)])
return attr_sum - (end_point - start_point)
class PerturbationAttribution(Attribution):
r"""
All perturbation based attribution algorithms extend this class. It requires a
forward function, which most commonly is the forward function of the model
that we want to interpret or the model itself.
"""
def __init__(self, forward_func):
| |
'proxy': True,
'verbose_name_plural': '47 Foto ATL Perdagangan',
},
bases=('atl.fotoatl',),
),
migrations.CreateModel(
name='FotoATLPerikanan',
fields=[
],
options={
'verbose_name': '45 Foto ATL Perikanan',
'proxy': True,
'verbose_name_plural': '45 Foto ATL Perikanan',
},
bases=('atl.fotoatl',),
),
migrations.CreateModel(
name='FotoATLPerpustakaan',
fields=[
],
options={
'verbose_name': '08 Foto ATL Perpustakaan',
'proxy': True,
'verbose_name_plural': '08 Foto ATL Perpustakaan',
},
bases=('atl.fotoatl',),
),
migrations.CreateModel(
name='FotoATLPertanian',
fields=[
],
options={
'verbose_name': '13 Foto ATL Pertanian',
'proxy': True,
'verbose_name_plural': '13 Foto ATL Pertanian',
},
bases=('atl.fotoatl',),
),
migrations.CreateModel(
name='FotoATLRSUD',
fields=[
],
options={
'verbose_name': '06 Foto ATL RSUD',
'proxy': True,
'verbose_name_plural': '06 Foto ATL RSUD',
},
bases=('atl.fotoatl',),
),
migrations.CreateModel(
name='FotoATLSATPOLPP',
fields=[
],
options={
'verbose_name': '25 Foto ATL SATPOLPP',
'proxy': True,
'verbose_name_plural': '25 Foto ATL SATPOLPP',
},
bases=('atl.fotoatl',),
),
migrations.CreateModel(
name='FotoATLSekretariatKorpri',
fields=[
],
options={
'verbose_name': '27 Foto ATL Sekretariat Korpri',
'proxy': True,
'verbose_name_plural': '27 Foto ATL Sekretariat Korpri',
},
bases=('atl.fotoatl',),
),
migrations.CreateModel(
name='FotoATLSetda',
fields=[
],
options={
'verbose_name': '02 Foto ATL Setda',
'proxy': True,
'verbose_name_plural': '02 Foto ATL Setda',
},
bases=('atl.fotoatl',),
),
migrations.CreateModel(
name='FotoATLSetwan',
fields=[
],
options={
'verbose_name': '01 Foto ATL Setwan',
'proxy': True,
'verbose_name_plural': '01 Foto ATL Setwan',
},
bases=('atl.fotoatl',),
),
migrations.CreateModel(
name='FotoATLSosial',
fields=[
],
options={
'verbose_name': '09 Foto ATL Sosial',
'proxy': True,
'verbose_name_plural': '09 Foto ATL Sosial',
},
bases=('atl.fotoatl',),
),
migrations.CreateModel(
name='FotoATLTebingTinggi',
fields=[
],
options={
'verbose_name': '38 Foto ATL Tebing Tinggi',
'proxy': True,
'verbose_name_plural': '38 Foto ATL Tebing Tinggi',
},
bases=('atl.fotoatl',),
),
migrations.CreateModel(
name='HargaATLAwayan',
fields=[
],
options={
'verbose_name': '34 Harga ATL Awayan',
'proxy': True,
'verbose_name_plural': '34 Harga ATL Awayan',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLBAPPEDA',
fields=[
],
options={
'verbose_name': '21 Harga ATL BAPPEDA',
'proxy': True,
'verbose_name_plural': '21 Harga ATL BAPPEDA',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLBatumandi',
fields=[
],
options={
'verbose_name': '32 Harga ATL Batumandi',
'proxy': True,
'verbose_name_plural': '32 Harga ATL Batumandi',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLBatuPiring',
fields=[
],
options={
'verbose_name': '37 Harga ATL Batu Piring',
'proxy': True,
'verbose_name_plural': '37 Harga ATL Batu Piring',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLBKD',
fields=[
],
options={
'verbose_name': '19 Harga ATL BKD',
'proxy': True,
'verbose_name_plural': '19 Harga ATL BKD',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLBKPPD',
fields=[
],
options={
'verbose_name': '26 Harga ATL BKPPD',
'proxy': True,
'verbose_name_plural': '26 Harga ATL BKPPD',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLBPBD',
fields=[
],
options={
'verbose_name': '39 Harga ATL BPBD',
'proxy': True,
'verbose_name_plural': '39 Harga ATL BPBD',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLBPPD',
fields=[
],
options={
'verbose_name': '48 Harga ATL BPPD',
'proxy': True,
'verbose_name_plural': '48 Harga ATL BPPD',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLDinkes',
fields=[
],
options={
'verbose_name': '05 Harga ATL Dinkes',
'proxy': True,
'verbose_name_plural': '05 Harga ATL Dinkes',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLDinkesAwayan',
fields=[
],
options={
'verbose_name': '05 Harga ATL Dinkes Awayan',
'proxy': True,
'verbose_name_plural': '05 Harga ATL Dinkes Awayan',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLDinkesBatumandi',
fields=[
],
options={
'verbose_name': '05 Harga ATL Dinkes Batumandi',
'proxy': True,
'verbose_name_plural': '05 Harga ATL Dinkes Batumandi',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLDinkesHalong',
fields=[
],
options={
'verbose_name': '05 Harga ATL Dinkes Halong',
'proxy': True,
'verbose_name_plural': '05 Harga ATL Dinkes Halong',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLDinkesJuai',
fields=[
],
options={
'verbose_name': '05 Harga ATL Dinkes Juai',
'proxy': True,
'verbose_name_plural': '05 Harga ATL Dinkes Juai',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLDinkesKantor',
fields=[
],
options={
'verbose_name': '05 Harga ATL Dinkes Kantor',
'proxy': True,
'verbose_name_plural': '05 Harga ATL Dinkes Kantor',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLDinkesLampihong',
fields=[
],
options={
'verbose_name': '05 Harga ATL Dinkes Lampihong',
'proxy': True,
'verbose_name_plural': '05 Harga ATL Dinkes Lampihong',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLDinkesLokbatu',
fields=[
],
options={
'verbose_name': '05 Harga ATL Dinkes Lokbatu',
'proxy': True,
'verbose_name_plural': '05 Harga ATL Dinkes Lokbatu',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLDinkesParingin',
fields=[
],
options={
'verbose_name': '05 Harga ATL Dinkes Paringin',
'proxy': True,
'verbose_name_plural': '05 Harga ATL Dinkes Paringin',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLDinkesParinginSelatan',
fields=[
],
options={
'verbose_name': '05 Harga ATL Dinkes Paringin Selatan',
'proxy': True,
'verbose_name_plural': '05 Harga ATL Dinkes Paringin Selatan',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLDinkesPirsus',
fields=[
],
options={
'verbose_name': '05 Harga ATL Dinkes Pirsus',
'proxy': True,
'verbose_name_plural': '05 Harga ATL Dinkes Pirsus',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLDinkesRSUD',
fields=[
],
options={
'verbose_name': '05 Harga ATL Dinkes RSUD',
'proxy': True,
'verbose_name_plural': '05 Harga ATL Dinkes RSUD',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLDinkesTanahHabang',
fields=[
],
options={
'verbose_name': '05 Harga ATL Dinkes Tanah Habang',
'proxy': True,
'verbose_name_plural': '05 Harga ATL Dinkes Tanah Habang',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLDinkesTebingTinggi',
fields=[
],
options={
'verbose_name': '05 Harga ATL Dinkes Tebing Tinggi',
'proxy': True,
'verbose_name_plural': '05 Harga ATL Dinkes Tebing Tinggi',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLDinkesUren',
fields=[
],
options={
'verbose_name': '05 Harga ATL Dinkes Uren',
'proxy': True,
'verbose_name_plural': '05 Harga ATL Dinkes Uren',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLDisdik',
fields=[
],
options={
'verbose_name': '07 Harga ATL Disdik',
'proxy': True,
'verbose_name_plural': '07 Harga ATL Disdik',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLDisdikAwayan',
fields=[
],
options={
'verbose_name': '07 Harga ATL Disdik Awayan',
'proxy': True,
'verbose_name_plural': '07 Harga ATL Disdik Awayan',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLDisdikBatumandi',
fields=[
],
options={
'verbose_name': '07 Harga ATL Disdik Batumandi',
'proxy': True,
'verbose_name_plural': '07 Harga ATL Disdik Batumandi',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLDisdikHalong',
fields=[
],
options={
'verbose_name': '07 Harga ATL Disdik Halong',
'proxy': True,
'verbose_name_plural': '07 Harga ATL Disdik Halong',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLDisdikJuai',
fields=[
],
options={
'verbose_name': '07 Harga ATL Disdik Juai',
'proxy': True,
'verbose_name_plural': '07 Harga ATL Disdik Juai',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLDisdikKantor',
fields=[
],
options={
'verbose_name': '07 Harga ATL Disdik Kantor',
'proxy': True,
'verbose_name_plural': '07 Harga ATL Disdik Kantor',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLDisdikLampihong',
fields=[
],
options={
'verbose_name': '07 Harga ATL Disdik Lampihong',
'proxy': True,
'verbose_name_plural': '07 Harga ATL Disdik Lampihong',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLDisdikParingin',
fields=[
],
options={
'verbose_name': '07 Harga ATL Disdik Paringin',
'proxy': True,
'verbose_name_plural': '07 Harga ATL Disdik Paringin',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLDisdikParinginSelatan',
fields=[
],
options={
'verbose_name': '07 Harga ATL Disdik Paringin Selatan',
'proxy': True,
'verbose_name_plural': '07 Harga ATL Disdik Paringin Selatan',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLDisdikSMPN1Awayan',
fields=[
],
options={
'verbose_name': '07 Harga ATL Disdik SMPN 1 Awayan',
'proxy': True,
'verbose_name_plural': '07 Harga ATL Disdik SMPN 1 Awayan',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLDisdikSMPN1Batumandi',
fields=[
],
options={
'verbose_name': '07 Harga ATL Disdik SMPN 1 Batumandi',
'proxy': True,
'verbose_name_plural': '07 Harga ATL Disdik SMPN 1 Batumandi',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLDisdikSMPN1Halong',
fields=[
],
options={
'verbose_name': '07 Harga ATL Disdik SMPN 1 Halong',
'proxy': True,
'verbose_name_plural': '07 Harga ATL Disdik SMPN 1 Halong',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLDisdikSMPN1Juai',
fields=[
],
options={
'verbose_name': '07 Harga ATL Disdik SMPN 1 Juai',
'proxy': True,
'verbose_name_plural': '07 Harga ATL Disdik SMPN 1 Juai',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLDisdikSMPN1Lampihong',
fields=[
],
options={
'verbose_name': '07 Harga ATL Disdik SMPN 1 Lampihong',
'proxy': True,
'verbose_name_plural': '07 Harga ATL Disdik SMPN 1 Lampihong',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLDisdikSMPN1Paringin',
fields=[
],
options={
'verbose_name': '07 Harga ATL Disdik SMPN 1 Paringin',
'proxy': True,
'verbose_name_plural': '07 Harga ATL Disdik SMPN 1 Paringin',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLDisdikSMPN2Awayan',
fields=[
],
options={
'verbose_name': '07 Harga ATL Disdik SMPN 2 Awayan',
'proxy': True,
'verbose_name_plural': '07 Harga ATL Disdik SMPN 2 Awayan',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLDisdikSMPN2Batumandi',
fields=[
],
options={
'verbose_name': '07 Harga ATL Disdik SMPN 2 Batumandi',
'proxy': True,
'verbose_name_plural': '07 Harga ATL Disdik SMPN 2 Batumandi',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLDisdikSMPN2Halong',
fields=[
],
options={
'verbose_name': '07 Harga ATL Disdik SMPN 2 Halong',
'proxy': True,
'verbose_name_plural': '07 Harga ATL Disdik SMPN 2 Halong',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLDisdikSMPN2Juai',
fields=[
],
options={
'verbose_name': '07 Harga ATL Disdik SMPN 2 Juai',
'proxy': True,
'verbose_name_plural': '07 Harga ATL Disdik SMPN 2 Juai',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLDisdikSMPN2Lampihong',
fields=[
],
options={
'verbose_name': '07 Harga ATL Disdik SMPN 2 Lampihong',
'proxy': True,
'verbose_name_plural': '07 Harga ATL Disdik SMPN 2 Lampihong',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLDisdikSMPN2Paringin',
fields=[
],
options={
'verbose_name': '07 Harga ATL Disdik SMPN 2 Paringin',
'proxy': True,
'verbose_name_plural': '07 Harga ATL Disdik SMPN 2 Paringin',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLDisdikSMPN3Awayan',
fields=[
],
options={
'verbose_name': '07 Harga ATL Disdik SMPN 3 Awayan',
'proxy': True,
'verbose_name_plural': '07 Harga ATL Disdik SMPN 3 Awayan',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLDisdikSMPN3Batumandi',
fields=[
],
options={
'verbose_name': '07 Harga ATL Disdik SMPN 3 Batumandi',
'proxy': True,
'verbose_name_plural': '07 Harga ATL Disdik SMPN 3 Batumandi',
},
bases=('atl.hargaatl',),
),
migrations.CreateModel(
name='HargaATLDisdikSMPN3Halong',
fields=[
],
options={
'verbose_name': '07 Harga | |
Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.upload_run_artifact_with_http_info(owner, project, uuid, uploadfile, **kwargs) # noqa: E501
def upload_run_artifact_with_http_info(self, owner, project, uuid, uploadfile, **kwargs): # noqa: E501
"""Upload an artifact file to a store via run access # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.upload_run_artifact_with_http_info(owner, project, uuid, uploadfile, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str project: Project having access to the store (required)
:param str uuid: Unique integer identifier of the entity (required)
:param file uploadfile: The file to upload. (required)
:param str path: File path query params.
:param bool overwrite: File path query params.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'owner',
'project',
'uuid',
'uploadfile',
'path',
'overwrite'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method upload_run_artifact" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and ('owner' not in local_var_params or # noqa: E501
local_var_params['owner'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `owner` when calling `upload_run_artifact`") # noqa: E501
# verify the required parameter 'project' is set
if self.api_client.client_side_validation and ('project' not in local_var_params or # noqa: E501
local_var_params['project'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `project` when calling `upload_run_artifact`") # noqa: E501
# verify the required parameter 'uuid' is set
if self.api_client.client_side_validation and ('uuid' not in local_var_params or # noqa: E501
local_var_params['uuid'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `uuid` when calling `upload_run_artifact`") # noqa: E501
# verify the required parameter 'uploadfile' is set
if self.api_client.client_side_validation and ('uploadfile' not in local_var_params or # noqa: E501
local_var_params['uploadfile'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `uploadfile` when calling `upload_run_artifact`") # noqa: E501
collection_formats = {}
path_params = {}
if 'owner' in local_var_params:
path_params['owner'] = local_var_params['owner'] # noqa: E501
if 'project' in local_var_params:
path_params['project'] = local_var_params['project'] # noqa: E501
if 'uuid' in local_var_params:
path_params['uuid'] = local_var_params['uuid'] # noqa: E501
query_params = []
if 'path' in local_var_params and local_var_params['path'] is not None: # noqa: E501
query_params.append(('path', local_var_params['path'])) # noqa: E501
if 'overwrite' in local_var_params and local_var_params['overwrite'] is not None: # noqa: E501
query_params.append(('overwrite', local_var_params['overwrite'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
if 'uploadfile' in local_var_params:
local_var_files['uploadfile'] = local_var_params['uploadfile'] # noqa: E501
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['multipart/form-data']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKey'] # noqa: E501
return self.api_client.call_api(
'/api/v1/{owner}/{project}/runs/{uuid}/artifacts/upload', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def upload_run_logs(self, owner, project, uuid, uploadfile, **kwargs): # noqa: E501
"""Upload a logs file to a store via run access # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.upload_run_logs(owner, project, uuid, uploadfile, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str project: Project having access to the store (required)
:param str uuid: Unique integer identifier of the entity (required)
:param file uploadfile: The file to upload. (required)
:param str path: File path query params.
:param bool overwrite: File path query params.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.upload_run_logs_with_http_info(owner, project, uuid, uploadfile, **kwargs) # noqa: E501
def upload_run_logs_with_http_info(self, owner, project, uuid, uploadfile, **kwargs): # noqa: E501
"""Upload a logs file to a store via run access # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.upload_run_logs_with_http_info(owner, project, uuid, uploadfile, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str owner: Owner of the namespace (required)
:param str project: Project having access to the store (required)
:param str uuid: Unique integer identifier of the entity (required)
:param file uploadfile: The file to upload. (required)
:param str path: File path query params.
:param bool overwrite: File path query params.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'owner',
'project',
'uuid',
'uploadfile',
'path',
'overwrite'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method upload_run_logs" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and ('owner' not in local_var_params or # noqa: E501
local_var_params['owner'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `owner` when calling `upload_run_logs`") # noqa: E501
# verify the required parameter 'project' is set
if self.api_client.client_side_validation and ('project' not in local_var_params or # noqa: E501
local_var_params['project'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `project` when calling `upload_run_logs`") # noqa: E501
# verify the required parameter 'uuid' is set
if self.api_client.client_side_validation and ('uuid' not in local_var_params or # noqa: E501
local_var_params['uuid'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `uuid` when calling `upload_run_logs`") # noqa: E501
# verify the required parameter 'uploadfile' is set
if self.api_client.client_side_validation and ('uploadfile' not in local_var_params or # noqa: E501
local_var_params['uploadfile'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `uploadfile` when calling `upload_run_logs`") # noqa: E501
collection_formats = {}
path_params = {}
if 'owner' in local_var_params:
path_params['owner'] = local_var_params['owner'] # noqa: E501
if 'project' in local_var_params:
path_params['project'] = local_var_params['project'] # noqa: E501
if 'uuid' in local_var_params:
path_params['uuid'] = local_var_params['uuid'] # noqa: E501
query_params = []
if 'path' in local_var_params and local_var_params['path'] is not None: # noqa: E501
query_params.append(('path', local_var_params['path'])) # noqa: E501
if 'overwrite' in local_var_params and local_var_params['overwrite'] is not None: # noqa: E501
query_params.append(('overwrite', local_var_params['overwrite'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
if 'uploadfile' in local_var_params:
local_var_files['uploadfile'] = local_var_params['uploadfile'] # noqa: E501
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['multipart/form-data']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKey'] # noqa: E501
return self.api_client.call_api(
'/api/v1/{owner}/{project}/runs/{uuid}/logs/upload', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, | |
<reponame>hi-artem/twistlock-py<filename>test/test_types_settings.py
# coding: utf-8
"""
Prisma Cloud Compute API
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: 21.04.439
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import openapi_client
from openapi_client.models.types_settings import TypesSettings # noqa: E501
from openapi_client.rest import ApiException
class TestTypesSettings(unittest.TestCase):
"""TypesSettings unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test TypesSettings
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = openapi_client.models.types_settings.TypesSettings() # noqa: E501
if include_optional :
return TypesSettings(
access_ca_cert = '',
address = '',
alerts = openapi_client.models.api/alert_settings.api.AlertSettings(
aggregation_period_ms = 56,
security_advisor_webhook = '', ),
cert_settings = openapi_client.models.types/cert_settings.types.CertSettings(
ca_expiration = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
console_san = [
''
], ),
certificate_period_days = 56,
check_revocation = True,
code_repo_settings = openapi_client.models.shared/code_repo_settings.shared.CodeRepoSettings(
specifications = [
openapi_client.models.shared/code_repo_specification.shared.CodeRepoSpecification(
credential_id = '',
excluded_manifest_paths = [
''
],
explicit_manifest_names = [
''
],
public_only = True,
repositories = [
''
],
target_python_version = '',
type = '[\"github\",\"CI\"]', )
],
webhook_url_suffix = '', ),
communication_port = 56,
console_ca_cert = '',
console_custom_cert = openapi_client.models.common/secret.common.Secret(
encrypted = '',
plain = '', ),
console_names = [
''
],
custom_endpoint = '',
custom_endpoint_ca_cert = '',
custom_endpoint_credential_id = '',
custom_endpoint_enabled = True,
custom_labels = openapi_client.models.shared/custom_labels_settings.shared.CustomLabelsSettings(
labels = [
''
], ),
defender_settings = openapi_client.models.defender/settings.defender.Settings(
admission_control_enabled = True,
admission_control_webhook_suffix = '',
automatic_upgrade = True,
disconnect_period_days = 56,
host_custom_compliance_enabled = True,
listening_port = 56, ),
enabled = True,
forensic = openapi_client.models.shared/forensic_settings.shared.ForensicSettings(
collect_network_firewall = True,
collect_network_snapshot = True,
container_disk_usage_mb = 56,
enabled = True,
host_disk_usage_mb = 56,
incident_snapshots_cap = 56, ),
has_admin = True,
host_auto_deploy = [
openapi_client.models.shared/host_auto_deploy_specification.shared.HostAutoDeploySpecification(
aws_region_type = '[\"regular\",\"gov\",\"china\",\"all\"]',
collections = [
openapi_client.models.collection/collection.collection.Collection(
account_ids = [
''
],
app_ids = [
''
],
clusters = [
''
],
code_repos = [
''
],
color = '',
containers = [
''
],
description = '',
functions = [
''
],
hosts = [
''
],
images = [
''
],
labels = [
''
],
modified = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
name = '',
namespaces = [
''
],
owner = '',
prisma = True,
system = True, )
],
console_hostname = '',
credential_id = '',
last_modified = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
name = '', )
],
hpkp = openapi_client.models.types/hpkp_settings.types.HPKPSettings(
certs = '',
enabled = True,
fingerprints = [
''
], ),
identity_settings = openapi_client.models.identity/settings.identity.Settings(
ldap = openapi_client.models.identity/ldap_settings.identity.LdapSettings(
account_password = openapi_client.models.common/secret.common.Secret(
encrypted = '',
plain = '', ),
account_upn = '',
ca_cert = '',
enabled = True,
group_search_base = '',
search_base = '',
type = '',
url = '',
user_search_base = '',
user_search_identifier = '', ),
oauth = openapi_client.models.identity/provider_settings.identity.ProviderSettings(
auth_url = '',
cert = '',
client_id = '',
client_secret = openapi_client.models.common/secret.common.Secret(
encrypted = '',
plain = '', ),
enabled = True,
group_claim = '',
group_scope = '',
open_id_issues_url = '',
openshift_base_url = '',
provider_alias = '',
provider_name = '[\"github\",\"openshift\"]',
token_url = '', ),
openid = openapi_client.models.identity/provider_settings.identity.ProviderSettings(
auth_url = '',
cert = '',
client_id = '',
enabled = True,
group_claim = '',
group_scope = '',
open_id_issues_url = '',
openshift_base_url = '',
provider_alias = '',
token_url = '', ),
saml = openapi_client.models.identity/saml_settings.identity.SamlSettings(
app_id = '',
app_secret = openapi_client.models.common/secret.common.Secret(
encrypted = '',
plain = '', ),
audience = '',
cert = '',
console_url = '',
enabled = True,
issuer = '',
provider_alias = '',
skip_authn_context = True,
tenant_id = '',
type = '[\"okta\",\"gsuite\",\"ping\",\"shibboleth\",\"azure\",\"adfs\"]',
url = '', ), ),
kubernetes_audit = openapi_client.models.shared/kubernetes_audit_settings.shared.KubernetesAuditSettings(
credential_id = '',
deployment_type = '[\"default\",\"gke\"]',
last_polling_time = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
project_ids = [
''
],
stackdriver_filter = '',
webhook_url_suffix = '', ),
ldap_enabled = True,
license_key = '',
logging = openapi_client.models.shared/logging_settings.shared.LoggingSettings(
console_address = '',
enable_metrics_collection = True,
include_runtime_link = True,
stdout = openapi_client.models.shared/logger_setting.shared.LoggerSetting(
all_proc_events = True,
enabled = True,
verbose_scan = True, ),
syslog = openapi_client.models.shared/syslog_settings.shared.SyslogSettings(
addr = '',
all_proc_events = True,
enabled = True,
id = '',
verbose_scan = True, ), ),
logon = openapi_client.models.types/logon_settings.types.LogonSettings(
basic_auth_disabled = True,
include_tls = True,
session_timeout_sec = 56,
strong_password = True,
use_support_credentials = True, ),
oauth_enabled = True,
oidc_enabled = True,
projects = openapi_client.models.api/project_settings.api.ProjectSettings(
master = True,
redirect_url = '', ),
proxy = openapi_client.models.common/proxy_settings.common.ProxySettings(
ca = '',
http_proxy = '',
no_proxy = '',
password = openapi_client.models.common/secret.common.Secret(
encrypted = '',
plain = '', ),
user = '', ),
registry = openapi_client.models.shared/registry_settings.shared.RegistrySettings(
harbor_scanner_url_suffix = '',
specifications = [
openapi_client.models.shared/registry_specification.shared.RegistrySpecification(
cap = 56,
collections = [
''
],
credential = openapi_client.models.cred/credential.cred.Credential(
_id = '',
account_guid = '',
account_id = '',
api_token = openapi_client.models.common/secret.common.Secret(
encrypted = '',
plain = '', ),
ca_cert = '',
description = '',
external = True,
last_modified = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
owner = '',
role_arn = '',
secret = openapi_client.models.common/secret.common.Secret(
encrypted = '',
plain = '', ),
tokens = openapi_client.models.cred/temporary_token.cred.TemporaryToken(
aws_access_key_id = '',
aws_secret_access_key = openapi_client.models.common/secret.common.Secret(
encrypted = '',
plain = '', ),
duration = 56,
expiration_time = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
token = openapi_client.models.common/secret.common.Secret(
encrypted = '',
plain = '', ), ),
type = '[\"aws\",\"azure\",\"gcp\",\"ibmCloud\",\"apiToken\",\"githubToken\",\"basic\",\"dtr\",\"kubeconfig\",\"certificate\"]',
use_aws_role = True, ),
credential_id = '',
excluded_repositories = [
''
],
excluded_tags = [
''
],
jfrog_repo_types = [
'[\"local\",\"remote\",\"virtual\"]'
],
namespace = '',
os = '[\"linux\",\"windows\"]',
registry = '',
repository = '',
scanners = 56,
tag = '',
version = '',
version_pattern = '', )
],
webhook_url_suffix = '', ),
saml_enabled = True,
scan = openapi_client.models.shared/scan_settings.shared.ScanSettings(
cloud_platforms_scan_period_ms = 56,
code_repos_scan_period_ms = 56,
containers_scan_period_ms = 56,
extract_archive = True,
images_scan_period_ms = 56,
include_js_dependencies = True,
registry_scan_period_ms = 56,
registry_scan_retention_days = 56,
scan_running_images = True,
serverless_scan_period_ms = 56,
show_infra_containers = True,
show_negligible_vulnerabilities = True,
system_scan_period_ms = 56,
tas_droplets_scan_period_ms = 56,
vm_scan_period_ms = 56, ),
secrets_stores = openapi_client.models.shared/secrets_stores.shared.SecretsStores(
refresh_period_hours = 56,
secrets_stores = [
openapi_client.models.shared/secrets_store.shared.SecretsStore(
app_id = '',
ca_cert = openapi_client.models.common/secret.common.Secret(
encrypted = '',
plain = '', ),
client_cert = openapi_client.models.common/secret.common.Secret(
encrypted = '',
plain = '', ),
credential_id = '',
name = '',
region = '',
type = '[\"hashicorp\",\"hashicorp010\",\"cyberark\",\"awsParameterStore\",\"awsSecretsManager\",\"azure\"]',
url = '', )
], ),
secured_console_port = 56,
serverless_auto_deploy = [
openapi_client.models.shared/serverless_auto_deploy_specification.shared.ServerlessAutoDeploySpecification(
collections = [
openapi_client.models.collection/collection.collection.Collection(
account_ids = [
''
],
app_ids = [
''
],
clusters = [
''
],
code_repos = [
''
],
color = '',
containers = [
''
],
description = '',
functions = [
''
],
hosts = [
''
],
images = [
''
],
labels = [
''
],
modified = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
name = '',
namespaces = [
''
],
owner = '',
prisma = True,
system = True, )
],
console_addr = '',
credential_id = '', | |
elif event.type == MOUSEBUTTONDOWN:
# store current mouse position for mouse-steering
if event.button == 1:
self._mouse_steering_center = event.pos
elif event.type == MOUSEBUTTONUP:
if event.button == 1:
self._mouse_steering_center = None
if not self._autopilot_enabled:
prev_steer_cache = self._steer_cache
self._parse_vehicle_keys(pygame.key.get_pressed(), clock.get_time())
if pygame.mouse.get_pressed()[0]:
self._parse_mouse(pygame.mouse.get_pos())
self._control.reverse = self._control.gear < 0
vehicle_control = self._control
world.hud.original_vehicle_control = vehicle_control
world.hud.restricted_vehicle_control = vehicle_control
# limit speed to 30kmh
v = self._world.player.get_velocity()
if (3.6 * math.sqrt(v.x**2 + v.y**2 + v.z**2)) > 30.0:
self._control.throttle = 0
# if self._world.rss_sensor and self._world.rss_sensor.ego_dynamics_on_route and not self._world.rss_sensor.ego_dynamics_on_route.ego_center_within_route:
# print ("Not on route!" + str(self._world.rss_sensor.ego_dynamics_on_route))
if self._restrictor:
rss_proper_response = self._world.rss_sensor.proper_response if self._world.rss_sensor and self._world.rss_sensor.response_valid else None
if rss_proper_response:
if not (pygame.key.get_mods() & KMOD_CTRL):
vehicle_control = self._restrictor.restrict_vehicle_control(
vehicle_control, rss_proper_response, self._world.rss_sensor.ego_dynamics_on_route, self._vehicle_physics)
world.hud.restricted_vehicle_control = vehicle_control
world.hud.allowed_steering_ranges = self._world.rss_sensor.get_steering_ranges()
if world.hud.original_vehicle_control.steer != world.hud.restricted_vehicle_control.steer:
self._steer_cache = prev_steer_cache
# Set automatic control-related vehicle lights
if vehicle_control.brake:
current_lights |= carla.VehicleLightState.Brake
else: # Remove the Brake flag
current_lights &= carla.VehicleLightState.All ^ carla.VehicleLightState.Brake
if vehicle_control.reverse:
current_lights |= carla.VehicleLightState.Reverse
else: # Remove the Reverse flag
current_lights &= carla.VehicleLightState.All ^ carla.VehicleLightState.Reverse
if current_lights != self._lights: # Change the light state only if necessary
self._lights = current_lights
world.player.set_light_state(carla.VehicleLightState(self._lights))
world.player.apply_control(vehicle_control)
def _parse_vehicle_keys(self, keys, milliseconds):
if keys[K_UP] or keys[K_w]:
self._control.throttle = min(self._control.throttle + 0.2, 1)
else:
self._control.throttle = max(self._control.throttle - 0.2, 0)
if keys[K_DOWN] or keys[K_s]:
self._control.brake = min(self._control.brake + 0.2, 1)
else:
self._control.brake = max(self._control.brake - 0.2, 0)
steer_increment = 5e-4 * milliseconds
if keys[K_LEFT] or keys[K_a]:
if self._steer_cache > 0:
self._steer_cache = 0
else:
self._steer_cache -= steer_increment
elif keys[K_RIGHT] or keys[K_d]:
if self._steer_cache < 0:
self._steer_cache = 0
else:
self._steer_cache += steer_increment
elif self._steer_cache > 0:
self._steer_cache = max(self._steer_cache - steer_increment, 0.0)
elif self._steer_cache < 0:
self._steer_cache = min(self._steer_cache + steer_increment, 0.0)
else:
self._steer_cache = 0
self._steer_cache = min(1.0, max(-1.0, self._steer_cache))
self._control.steer = round(self._steer_cache, 1)
self._control.hand_brake = keys[K_SPACE]
def _parse_mouse(self, pos):
if not self._mouse_steering_center:
return
lateral = float(pos[0] - self._mouse_steering_center[0])
longitudinal = float(pos[1] - self._mouse_steering_center[1])
max_val = self.MOUSE_STEERING_RANGE
lateral = -max_val if lateral < -max_val else max_val if lateral > max_val else lateral
longitudinal = -max_val if longitudinal < -max_val else max_val if longitudinal > max_val else longitudinal
self._control.steer = lateral / max_val
if longitudinal < 0.0:
self._control.throttle = -longitudinal / max_val
self._control.brake = 0.0
elif longitudinal > 0.0:
self._control.throttle = 0.0
self._control.brake = longitudinal / max_val
@staticmethod
def _is_quit_shortcut(key):
return (key == K_ESCAPE) or (key == K_q and pygame.key.get_mods() & KMOD_CTRL)
# ==============================================================================
# -- HUD -----------------------------------------------------------------------
# ==============================================================================
class HUD(object):
def __init__(self, width, height, world):
self.dim = (width, height)
self._world = world
self.map_name = world.get_map().name
font = pygame.font.Font(pygame.font.get_default_font(), 20)
font_name = 'courier' if os.name == 'nt' else 'mono'
fonts = [x for x in pygame.font.get_fonts() if font_name in x]
default_font = 'ubuntumono'
mono = default_font if default_font in fonts else fonts[0]
mono = pygame.font.match_font(mono)
self._font_mono = pygame.font.Font(mono, 12 if os.name == 'nt' else 14)
self._notifications = FadingText(font, (width, 40), (0, height - 40))
self.help = HelpText(pygame.font.Font(mono, 16), width, height)
self.server_fps = 0
self.frame = 0
self.simulation_time = 0
self.original_vehicle_control = None
self.restricted_vehicle_control = None
self.allowed_steering_ranges = []
self._show_info = True
self._info_text = []
self._server_clock = pygame.time.Clock()
self.rss_state_visualizer = RssStateVisualizer(self.dim, self._font_mono, self._world)
def on_world_tick(self, timestamp):
self._server_clock.tick()
self.server_fps = self._server_clock.get_fps()
self.frame = timestamp.frame
self.simulation_time = timestamp.elapsed_seconds
def tick(self, player, clock):
self._notifications.tick(clock)
if not self._show_info:
return
t = player.get_transform()
v = player.get_velocity()
c = player.get_control()
self._info_text = [
'Server: % 16.0f FPS' % self.server_fps,
'Client: % 16.0f FPS' % clock.get_fps(),
'Map: % 20s' % self.map_name,
'',
'Speed: % 15.0f km/h' % (3.6 * math.sqrt(v.x**2 + v.y**2 + v.z**2)),
'Location:% 20s' % ('(% 5.1f, % 5.1f)' % (t.location.x, t.location.y)),
'Heading: % 20.2f' % math.radians(t.rotation.yaw),
'']
if self.original_vehicle_control:
orig_control = self.original_vehicle_control
restricted_control = self.restricted_vehicle_control
allowed_steering_ranges = self.allowed_steering_ranges
self._info_text += [
('Throttle:', orig_control.throttle, 0.0, 1.0, restricted_control.throttle),
('Steer:', orig_control.steer, -1.0, 1.0, restricted_control.steer, allowed_steering_ranges),
('Brake:', orig_control.brake, 0.0, 1.0, restricted_control.brake)]
self._info_text += [
('Reverse:', c.reverse),
'']
def toggle_info(self):
self._show_info = not self._show_info
def notification(self, text, seconds=2.0):
self._notifications.set_text(text, seconds=seconds)
def error(self, text):
self._notifications.set_text('Error: %s' % text, (255, 0, 0))
def render(self, display):
if self._show_info:
info_surface = pygame.Surface((220, self.dim[1]))
info_surface.set_alpha(100)
display.blit(info_surface, (0, 0))
v_offset = 4
bar_h_offset = 100
bar_width = 106
for item in self._info_text:
text_color = (255, 255, 255)
if v_offset + 18 > self.dim[1]:
break
if isinstance(item, list):
if len(item) > 1:
points = [(x + 8, v_offset + 8 + (1.0 - y) * 30) for x, y in enumerate(item)]
pygame.draw.lines(display, (255, 136, 0), False, points, 2)
item = None
v_offset += 18
elif isinstance(item, tuple):
if isinstance(item[1], bool):
rect = pygame.Rect((bar_h_offset, v_offset + 2), (10, 10))
pygame.draw.rect(display, (255, 255, 255), rect, 0 if item[1] else 1)
else:
# draw allowed steering ranges
if len(item) == 6 and item[2] < 0.0:
for steering_range in item[5]:
starting_value = min(steering_range[0], steering_range[1])
length = (max(steering_range[0], steering_range[1]) -
min(steering_range[0], steering_range[1])) / 2
rect = pygame.Rect(
(bar_h_offset + (starting_value + 1) * (bar_width / 2), v_offset + 2), (length * bar_width, 14))
pygame.draw.rect(display, (0, 255, 0), rect)
# draw border
rect_border = pygame.Rect((bar_h_offset, v_offset + 2), (bar_width, 14))
pygame.draw.rect(display, (255, 255, 255), rect_border, 1)
# draw value / restricted value
input_value_rect_fill = 0
if len(item) >= 5:
if item[1] != item[4]:
input_value_rect_fill = 1
f = (item[4] - item[2]) / (item[3] - item[2])
if item[2] < 0.0:
rect = pygame.Rect(
(bar_h_offset + 1 + f * (bar_width - 6), v_offset + 3), (12, 12))
else:
rect = pygame.Rect((bar_h_offset + 1, v_offset + 3), (f * bar_width, 12))
pygame.draw.rect(display, (255, 0, 0), rect)
f = (item[1] - item[2]) / (item[3] - item[2])
rect = None
if item[2] < 0.0:
rect = pygame.Rect((bar_h_offset + 2 + f * (bar_width - 14), v_offset + 4), (10, 10))
else:
if item[1] != 0:
rect = pygame.Rect((bar_h_offset + 2, v_offset + 4), (f * (bar_width - 4), 10))
if rect:
pygame.draw.rect(display, (255, 255, 255), rect, input_value_rect_fill)
item = item[0]
if item: # At this point has to be a str.
surface = self._font_mono.render(item, True, text_color)
display.blit(surface, (8, v_offset))
v_offset += 18
self.rss_state_visualizer.render(display, v_offset)
self._notifications.render(display)
self.help.render(display)
# ==============================================================================
# -- FadingText ----------------------------------------------------------------
# ==============================================================================
class FadingText(object):
def __init__(self, font, dim, pos):
self.font = font
self.dim = dim
self.pos = pos
self.seconds_left = 0
self.surface = pygame.Surface(self.dim)
def set_text(self, text, color=(255, 255, 255), seconds=2.0):
text_texture = self.font.render(text, True, color)
self.surface = pygame.Surface(self.dim)
self.seconds_left = seconds
self.surface.fill((0, 0, 0, 0))
self.surface.blit(text_texture, (10, 11))
def tick(self, clock):
delta_seconds = 1e-3 * clock.get_time()
self.seconds_left = max(0.0, self.seconds_left - delta_seconds)
self.surface.set_alpha(500.0 * self.seconds_left)
def render(self, display):
display.blit(self.surface, self.pos)
# ==============================================================================
# -- HelpText ------------------------------------------------------------------
# ==============================================================================
class HelpText(object):
"""Helper class to handle text output using pygame"""
def __init__(self, font, width, height):
lines = __doc__.split('\n')
self.font = font
self.line_space = 18
self.dim = (780, len(lines) * self.line_space + 12)
self.pos = (0.5 * width - 0.5 * self.dim[0], 0.5 * height - 0.5 * self.dim[1])
self.seconds_left = 0
self.surface = pygame.Surface(self.dim)
self.surface.fill((0, 0, 0, 0))
for n, line in enumerate(lines):
text_texture = self.font.render(line, True, (255, 255, 255))
self.surface.blit(text_texture, (22, n * self.line_space))
self._render = False
self.surface.set_alpha(220)
def toggle(self):
self._render = not self._render
def render(self, display):
if self._render:
display.blit(self.surface, self.pos)
# ==============================================================================
# -- game_loop() ---------------------------------------------------------------
# ==============================================================================
def game_loop(args):
pygame.init()
pygame.font.init()
world = None
try:
client = carla.Client(args.host, args.port)
client.set_timeout(2.0)
display = pygame.display.set_mode(
(args.width, args.height),
pygame.HWSURFACE | pygame.DOUBLEBUF)
world = World(client.get_world(), args)
controller = VehicleControl(world, args.autopilot)
clock = pygame.time.Clock()
while True:
clock.tick_busy_loop(60)
if controller.parse_events(world, clock):
return
world.tick(clock)
world.render(display)
controller.render(display)
pygame.display.flip()
finally:
if world is not None:
print('Destroying the world...')
world.destroy()
print('Destroyed!')
pygame.quit()
# ==============================================================================
# -- main() --------------------------------------------------------------------
# ==============================================================================
def main():
argparser = argparse.ArgumentParser(
description='CARLA Manual Control Client RSS')
argparser.add_argument(
'-v', '--verbose',
action='store_true',
dest='debug',
help='print debug information')
argparser.add_argument(
'--host',
metavar='H',
default='127.0.0.1',
help='IP of the host server (default: 127.0.0.1)')
argparser.add_argument(
'-p', '--port',
metavar='P',
default=2000,
type=int,
help='TCP port to listen to (default: 2000)')
argparser.add_argument(
'-a', '--autopilot',
action='store_true',
help='enable autopilot')
argparser.add_argument(
'--res',
metavar='WIDTHxHEIGHT',
default='1280x720',
help='window resolution (default: 1280x720)')
argparser.add_argument(
'--filter',
metavar='PATTERN',
default='vehicle.*',
help='actor filter (default: "vehicle.*")')
argparser.add_argument(
'--rolename',
metavar='NAME',
default='hero',
help='actor role name (default: "hero")')
argparser.add_argument(
'--externalActor',
action='store_true',
help='attaches to | |
import copy
import json
import logging
import unittest
from collections import defaultdict
from datetime import datetime
from datetime import timedelta
from unittest import mock
import pika
import pika.exceptions
from freezegun import freeze_time
from parameterized import parameterized
from src.data_store.redis import RedisApi
from src.data_store.redis.store_keys import Keys
from src.data_store.stores.config import ConfigStore
from src.message_broker.rabbitmq import RabbitMQApi
from src.utils import env
from src.utils.constants.rabbitmq import (CONFIG_EXCHANGE,
HEALTH_CHECK_EXCHANGE,
CONFIGS_STORE_INPUT_QUEUE_NAME,
HEARTBEAT_OUTPUT_WORKER_ROUTING_KEY,
CONFIGS_STORE_INPUT_ROUTING_KEY,
TOPIC)
from src.utils.exceptions import (PANICException)
from test.utils.utils import (connect_to_rabbit,
disconnect_from_rabbit,
delete_exchange_if_exists,
delete_queue_if_exists)
class TestConfigStore(unittest.TestCase):
def setUp(self) -> None:
self.dummy_logger = logging.getLogger('Dummy')
self.dummy_logger.disabled = True
self.connection_check_time_interval = timedelta(seconds=0)
self.rabbit_ip = env.RABBIT_IP
self.rabbitmq = RabbitMQApi(
self.dummy_logger, self.rabbit_ip,
connection_check_time_interval=self.connection_check_time_interval)
self.test_rabbit_manager = RabbitMQApi(
self.dummy_logger, self.rabbit_ip,
connection_check_time_interval=self.connection_check_time_interval)
self.redis_db = env.REDIS_DB
self.redis_host = env.REDIS_IP
self.redis_port = env.REDIS_PORT
self.redis_namespace = env.UNIQUE_ALERTER_IDENTIFIER
self.redis = RedisApi(self.dummy_logger, self.redis_db,
self.redis_host, self.redis_port, '',
self.redis_namespace,
self.connection_check_time_interval)
self.mongo_ip = env.DB_IP
self.mongo_db = env.DB_NAME
self.mongo_port = env.DB_PORT
self.test_store_name = 'store name'
self.test_store = ConfigStore(self.test_store_name,
self.dummy_logger,
self.rabbitmq)
self.heartbeat_routing_key = HEARTBEAT_OUTPUT_WORKER_ROUTING_KEY
self.test_queue_name = 'test queue'
connect_to_rabbit(self.rabbitmq)
self.rabbitmq.exchange_declare(HEALTH_CHECK_EXCHANGE, TOPIC, False,
True, False, False)
self.rabbitmq.exchange_declare(CONFIG_EXCHANGE, TOPIC, False, True,
False, False)
self.rabbitmq.queue_declare(CONFIGS_STORE_INPUT_QUEUE_NAME, False, True,
False, False)
self.rabbitmq.queue_bind(CONFIGS_STORE_INPUT_QUEUE_NAME,
CONFIG_EXCHANGE,
CONFIGS_STORE_INPUT_ROUTING_KEY)
connect_to_rabbit(self.test_rabbit_manager)
self.test_rabbit_manager.queue_declare(self.test_queue_name, False,
True, False, False)
self.test_rabbit_manager.queue_bind(self.test_queue_name,
HEALTH_CHECK_EXCHANGE,
self.heartbeat_routing_key)
self.test_parent_id = 'parent_id'
self.test_config_type = 'config_type'
self.test_data_str = 'test data'
self.test_exception = PANICException('test_exception', 1)
self.last_monitored = datetime(2012, 1, 1).timestamp()
self.routing_key_1 = 'chains.cosmos.cosmos.nodes_config'
self.routing_key_2 = 'chains.cosmos.cosmos.alerts_config'
self.routing_key_3 = 'chains.cosmos.cosmos.github_repos_config'
self.routing_key_4 = 'general.github_repos_config'
self.routing_key_5 = 'general.alerts_config'
self.routing_key_6 = 'general.systems_config'
self.routing_key_7 = 'channels.email_config'
self.routing_key_8 = 'channels.pagerduty_config'
self.routing_key_9 = 'channels.opsgenie_config'
self.routing_key_10 = 'channels.telegram_config'
self.routing_key_11 = 'channels.twilio_config'
self.routing_key_12 = 'chains.chainlink.bsc.nodes_config'
self.routing_key_13 = 'chains.chainlink.bsc.alerts_config'
self.routing_key_14 = 'chains.chainlink.bsc.github_repos_config'
self.routing_key_15 = 'chains.chainlink.bsc.systems_config'
self.routing_key_16 = 'chains.chainlink.bsc.evm_nodes_config'
self.nodes_config_1 = {
"node_3e0a5189-f474-4120-a0a4-d5ab817c0504": {
"id": "node_3e0a5189-f474-4120-a0a4-d5ab817c0504",
"parent_id": "chain_name_7f4bc842-21b1-4bcb-8ab9-d86e08149548",
"name": "cosmos_sentry_1",
"monitor_tendermint": "false",
"monitor_rpc": "false",
"monitor_prometheus": "false",
"exporter_url": "test_url",
"monitor_system": "true",
"is_validator": "false",
"monitor_node": "true",
"is_archive_node": "true",
"use_as_data_source": "true"
},
"node_f8ebf267-9b53-4aa1-9c45-e84a9cba5fbc": {
"id": "node_f8ebf267-9b53-4aa1-9c45-e84a9cba5fbc",
"parent_id": "chain_name_7f4bc842-21b1-4bcb-8ab9-d86e08149548",
"name": "cosmos_sentry_2",
"monitor_tendermint": "false",
"monitor_rpc": "false",
"monitor_prometheus": "false",
"exporter_url": "test_url",
"monitor_system": "true",
"is_validator": "false",
"monitor_node": "true",
"is_archive_node": "true",
"use_as_data_source": "true"
}
}
self.nodes_config_2 = {
"node_sgdfh4y5u56u56j": {
"id": "node_sgdfh4y5u56u56j",
"parent_id": "chain_name_okjhfghuhsiduiusdh",
"name": "bsc_sentry_1",
"node_prometheus_urls": "test_url1,test_url2",
"monitor_prometheus": "true",
"monitor_node": "true",
"monitor_contracts": "true",
"ethereum_addresses":
"0xC7040bEeC1A3794C3e7CC9bA5C68070DAD0b4c29"
},
"node_dfouihgdfuoghdfuudfh": {
"id": "node_dfouihgdfuoghdfuudfh",
"parent_id": "chain_name_okjhfghuhsiduiusdh",
"name": "bsc_sentry_2",
"node_prometheus_urls": "test_url1,test_url2",
"monitor_prometheus": "true",
"monitor_contracts": "true",
"monitor_node": "false",
"ethereum_addresses":
"0xC7040bEeC1A3794C3e7CC9bA5C68070MUM0b4c29"
},
"node_mkdfkghhnusd": {
"id": "node_mkdfkghhnusd",
"parent_id": "chain_name_okjhfghuhsiduiusdh",
"name": "bsc_sentry_3",
"node_prometheus_urls": "test_url1,test_url2",
"monitor_prometheus": "false",
"monitor_contracts": "false",
"monitor_node": "true",
"ethereum_addresses":
"0xC7040bEeC1A3794C3e7CC9bA5C68070SIS0b4c29"
},
"node_difuhbguidf": {
"id": "node_difuhbguidf",
"parent_id": "chain_name_okjhfghuhsiduiusdh",
"name": "bsc_sentry_4",
"node_prometheus_urls": "test_url1,test_url2",
"monitor_prometheus": "false",
"monitor_contracts": "false",
"monitor_node": "false",
"ethereum_addresses":
"0xC7040bEeC1A3794C3e7CC9bA5C68070BRO0b4c29"
},
}
self.evm_nodes_config = {
"node_4e0a5189-f474-4120-a0a4-d5ab817c0504": {
"id": "node_4e9eeacf-c98f-4207-81ec-7d5cb7a1ff7a",
"parent_id": "chain_name_2be935b4-1072-469c-a5ff-1495f032fefa",
"name": "evm_node_1",
"node_http_url": "test_url",
"monitor_node": "true"
},
"node_48ebf267-9b53-4aa1-9c45-e84a9cba5fbc": {
"id": "node_48ebf267-9b53-4aa1-9c45-e84a9cba5fbc",
"parent_id": "chain_name_2be935b4-1072-469c-a5ff-1495f032fefa",
"name": "evm_node_2",
"node_http_url": "test_url",
"monitor_node": "false"
}
}
self.github_repos_config_1 = {
"repo_4ea76d87-d291-4b68-88af-da2bd1e16e2e": {
"id": "repo_4ea76d87-d291-4b68-88af-da2bd1e16e2e",
"parent_id": "chain_name_7f4bc842-21b1-4bcb-8ab9-d86e08149548",
"repo_name": "tendermint/tendermint/",
"monitor_repo": "true"
},
"repo_83713022-4155-420b-ada1-73a863f58282": {
"id": "repo_83713022-4155-420b-ada1-73a863f58282",
"parent_id": "chain_name_7f4bc842-21b1-4bcb-8ab9-d86e08149548",
"repo_name": "SimplyVC/panic_cosmos/",
"monitor_repo": "false"
}
}
self.github_repos_config_2 = {
"repo_sd978fgt789sdfg78g334th87fg": {
"id": "repo_sd978fgt789sdfg78g334th87fg",
"parent_id": "GENERAL",
"repo_name": "SimplyVC/panic_polkadot/",
"monitor_repo": "true"
},
"repo_S789G7S9DGS97G": {
"id": "repo_S789G7S9DGS97G",
"parent_id": "GENERAL",
"repo_name": "SimplyVC/panic_cosmos/",
"monitor_repo": "false"
}
}
self.alerts_config_1 = {
"1": {
"name": "open_file_descriptors",
"enabled": "true",
"parent_id": "GENERAL",
"critical_threshold": "95",
"critical_repeat": "300",
"critical_enabled": "true",
"warning_threshold": "85",
"warning_enabled": "true"
},
"2": {
"name": "system_cpu_usage",
"enabled": "true",
"parent_id": "GENERAL",
"critical_threshold": "95",
"critical_repeat": "300",
"critical_enabled": "true",
"warning_threshold": "85",
"warning_enabled": "true"
},
"3": {
"name": "system_storage_usage",
"enabled": "true",
"parent_id": "GENERAL",
"critical_threshold": "95",
"critical_repeat": "300",
"critical_enabled": "true",
"warning_threshold": "85",
"warning_enabled": "true"
},
"4": {
"name": "system_ram_usage",
"enabled": "true",
"parent_id": "GENERAL",
"critical_threshold": "95",
"critical_repeat": "300",
"critical_enabled": "true",
"warning_threshold": "85",
"warning_enabled": "true"
},
"5": {
"name": "system_is_down",
"enabled": "true",
"parent_id": "GENERAL",
"critical_threshold": "200",
"critical_repeat": "300",
"critical_enabled": "true",
"warning_threshold": "0",
"warning_enabled": "true"
}
}
self.systems_config_1 = {
"system_1d026af1-6cab-403d-8256-c8faa462930a": {
"id": "system_1d026af1-6cab-403d-8256-c8faa462930a",
"parent_id": "GENERAL",
"name": "panic_system_1",
"exporter_url": "test_url",
"monitor_system": "true"
},
"system_a51b3a33-cb3f-4f53-a657-8a5a0efe0822": {
"id": "system_a51b3a33-cb3f-4f53-a657-8a5a0efe0822",
"parent_id": "GENERAL",
"name": "panic_system_2",
"exporter_url": "test_url",
"monitor_system": "false"
}
}
self.systems_config_2 = {
"system_098hfd90ghbsd98fgbs98rgf9": {
"id": "system_098hfd90ghbsd98fgbs98rgf9",
"parent_id": "chain_name_okjhfghuhsiduiusdh",
"name": "matic_full_node_nl",
"exporter_url": "test_url",
"monitor_system": "true"
},
"system_9sd8gh927gtb94gb99e": {
"id": "system_9sd8gh927gtb94gb99e",
"parent_id": "chain_name_okjhfghuhsiduiusdh",
"name": "matic_full_node_mt",
"exporter_url": "test_url",
"monitor_system": "false"
}
}
self.telegram_config_1 = {
"telegram_8431a28e-a2ce-4e9b-839c-299b62e3d5b9": {
"id": "telegram_8431a28e-a2ce-4e9b-839c-299b62e3d5b9",
"channel_name": "telegram_chat_1",
"bot_token": "test_bot_token",
"chat_id": "test_chat_id",
"info": "true",
"warning": "true",
"critical": "true",
"error": "true",
"alerts": "false",
"commands": "false",
"parent_ids":
"chain_name_7f4bc842-21b1-4bcb-8ab9-d86e08149548,"
"chain_name_94aafe04-8287-463a-8416-0401852b3ca2,GENERAL",
"parent_names": "cosmos,kusama,GENERAL"
}
}
self.twilio_config_1 = {
"twilio_a7016a6b-9394-4584-abe3-5a5c434b6b7c": {
"id": "twilio_a7016a6b-9394-4584-abe3-5a5c434b6b7c",
"channel_name": "twilio_caller_main",
"account_sid": "test_account_sid",
"auth_token": "test_auth_token",
"twilio_phone_no": "test_phone_number",
"twilio_phone_numbers_to_dial_valid":
"test_phone_numbers_to_dial_valid",
"parent_ids":
"chain_name_7f4bc842-21b1-4bcb-8ab9-d86e08149548,"
"chain_name_94aafe04-8287-463a-8416-0401852b3ca2,GENERAL",
"parent_names": "cosmos,kusama,GENERAL"
}
}
self.pagerduty_config_1 = {
"pagerduty_4092d0ed-ac45-462b-b62a-89cffd4833cc": {
"id": "pagerduty_4092d0ed-ac45-462b-b62a-89cffd4833cc",
"channel_name": "pager_duty_1",
"api_token": "test_api_token",
"integration_key": "test_integration_key",
"info": "true",
"warning": "true",
"critical": "true",
"error": "true",
"parent_ids":
"chain_name_7f4bc842-21b1-4bcb-8ab9-d86e08149548,"
"chain_name_94aafe04-8287-463a-8416-0401852b3ca2,GENERAL",
"parent_names": "cosmos,kusama,GENERAL"
}
}
self.opsgenie_config_1 = {
"opsgenie_9550bee1-5880-41f6-bdcf-a289472d7c35": {
"id": "opsgenie_9550bee1-5880-41f6-bdcf-a289472d7c35",
"channel_name": "ops_genie_main",
"api_token": "test_api_token",
"eu": "true",
"info": "true",
"warning": "true",
"critical": "true",
"error": "true",
"parent_ids":
"chain_name_7f4bc842-21b1-4bcb-8ab9-d86e08149548,"
"chain_name_94aafe04-8287-463a-8416-0401852b3ca2,GENERAL",
"parent_names": "cosmos,kusama,GENERAL"
}
}
self.email_config_1 = {
"email_01b23d79-10f5-4815-a11f-034f53974b23": {
"id": "email_01b23d79-10f5-4815-a11f-034f53974b23",
"channel_name": "main_email_channel",
"port": "test_port",
"smtp": "test_smtp",
"email_from": "test_email_from",
"emails_to": "test_email_to",
"info": "true",
"warning": "true",
"critical": "true",
"error": "true",
"parent_ids":
"chain_name_7f4bc842-21b1-4bcb-8ab9-d86e08149548,"
"chain_name_94aafe04-8287-463a-8416-0401852b3ca2,GENERAL",
"parent_names": "cosmos,kusama,GENERAL"
}
}
self.expected_data_nodes_1 = {
'cosmos': {
'monitored': {
'systems': [
{
'node_3e0a5189-f474-4120-a0a4-d5ab817c0504':
'cosmos_sentry_1'
},
{
'node_f8ebf267-9b53-4aa1-9c45-e84a9cba5fbc':
'cosmos_sentry_2'
}
]
},
'not_monitored': {
'systems': []
},
'parent_id': 'chain_name_7f4bc842-21b1-4bcb-8ab9-d86e08149548'
}
}
self.expected_data_nodes_2 = {
'bsc': {
'monitored': {
'nodes': [
{
"node_sgdfh4y5u56u56j": "bsc_sentry_1"
},
]
},
'not_monitored': {
'nodes': [
{
"node_dfouihgdfuoghdfuudfh": "bsc_sentry_2",
},
{
"node_mkdfkghhnusd": "bsc_sentry_3",
},
{
"node_difuhbguidf": "bsc_sentry_4",
},
]
},
"parent_id": "chain_name_okjhfghuhsiduiusdh"
}
}
self.expected_data_evm_nodes = {
'bsc': {
'monitored': {
'evm_nodes': [
{
"node_4e9eeacf-c98f-4207-81ec-7d5cb7a1ff7a":
"evm_node_1"
},
]
},
'not_monitored': {
'evm_nodes': [
{
"node_48ebf267-9b53-4aa1-9c45-e84a9cba5fbc":
"evm_node_2",
},
]
},
"parent_id": "chain_name_2be935b4-1072-469c-a5ff-1495f032fefa"
}
}
self.expected_data_repos_1 = {
'cosmos': {
'monitored': {
'github_repos': [
{
'repo_4ea76d87-d291-4b68-88af-da2bd1e16e2e':
'tendermint/tendermint/'
},
]
},
'not_monitored': {
'github_repos': [
{
'repo_83713022-4155-420b-ada1-73a863f58282':
'SimplyVC/panic_cosmos/'
}
]
},
'parent_id': 'chain_name_7f4bc842-21b1-4bcb-8ab9-d86e08149548'
}
}
self.expected_data_repos_2 = {
'general': {
'monitored': {
'github_repos': [
{
"repo_sd978fgt789sdfg78g334th87fg":
'SimplyVC/panic_polkadot/'
},
]
},
'not_monitored': {
'github_repos': [
{
'repo_S789G7S9DGS97G': 'SimplyVC/panic_cosmos/'
}
]
},
"parent_id": "GENERAL"
}
}
self.expected_data_systems_1 = {
'general': {
'monitored': {
'systems': [
{
'system_1d026af1-6cab-403d-8256-c8faa462930a':
'panic_system_1'
},
]
},
'not_monitored': {
'systems': [
{
'system_a51b3a33-cb3f-4f53-a657-8a5a0efe0822':
'panic_system_2'
},
]
},
"parent_id": "GENERAL"
}
}
self.expected_data_systems_2 = {
'bsc': {
'monitored': {
'systems': [
{
'system_098hfd90ghbsd98fgbs98rgf9':
'matic_full_node_nl'
},
]
},
'not_monitored': {
'systems': [
{
'system_9sd8gh927gtb94gb99e':
'matic_full_node_mt'
},
]
},
"parent_id": "chain_name_okjhfghuhsiduiusdh"
}
}
self.config_data_unexpected = {
"unexpected": {}
}
def tearDown(self) -> None:
connect_to_rabbit(self.rabbitmq)
delete_queue_if_exists(self.rabbitmq, CONFIGS_STORE_INPUT_QUEUE_NAME)
delete_exchange_if_exists(self.rabbitmq, CONFIG_EXCHANGE)
delete_exchange_if_exists(self.rabbitmq, HEALTH_CHECK_EXCHANGE)
disconnect_from_rabbit(self.rabbitmq)
connect_to_rabbit(self.test_rabbit_manager)
delete_queue_if_exists(self.test_rabbit_manager, self.test_queue_name)
disconnect_from_rabbit(self.test_rabbit_manager)
self.redis.delete_all_unsafe()
self.redis = None
self.dummy_logger = None
self.connection_check_time_interval = None
self.rabbitmq = None
self.test_store._redis = None
self.test_rabbit_manager = None
self.test_store = None
def test__str__returns_name_correctly(self) -> None:
self.assertEqual(self.test_store_name, str(self.test_store))
def test_name_property_returns_name_correctly(self) -> None:
self.assertEqual(self.test_store_name, self.test_store.name)
def test_mongo_ip_property_returns_mongo_ip_correctly(self) -> None:
self.assertEqual(self.mongo_ip, self.test_store.mongo_ip)
def test_mongo_db_property_returns_mongo_db_correctly(self) -> None:
self.assertEqual(self.mongo_db, self.test_store.mongo_db)
def test_mongo_port_property_returns_mongo_port_correctly(self) -> None:
self.assertEqual(self.mongo_port, self.test_store.mongo_port)
def test_redis_property_returns_redis_correctly(self) -> None:
self.assertEqual(type(self.redis), type(self.test_store.redis))
def test_mongo_property_returns_none_when_mongo_not_init(self) -> None:
self.assertEqual(None, self.test_store.mongo)
def test_initialise_rabbitmq_initialises_everything_as_expected(
self) -> None:
# To make sure that the exchanges have not already been declared
self.rabbitmq.connect()
self.rabbitmq.exchange_delete(HEALTH_CHECK_EXCHANGE)
self.rabbitmq.exchange_delete(CONFIG_EXCHANGE)
self.rabbitmq.disconnect()
self.test_store._initialise_rabbitmq()
# Perform checks that the connection has been opened, marked as open
# and that the delivery confirmation variable is set.
self.assertTrue(self.test_store.rabbitmq.is_connected)
self.assertTrue(self.test_store.rabbitmq.connection.is_open)
self.assertTrue(
self.test_store.rabbitmq.channel._delivery_confirmation)
# Check whether the producing exchanges have been created by using
# passive=True. If this check fails an exception is raised
# automatically.
self.test_store.rabbitmq.exchange_declare(
CONFIG_EXCHANGE, passive=True)
self.test_store.rabbitmq.exchange_declare(HEALTH_CHECK_EXCHANGE,
passive=True)
# Check whether the queue has been creating by sending messages with the
# same routing key. If this fails an exception is raised, hence the test
# fails.
self.test_store.rabbitmq.basic_publish_confirm(
exchange=CONFIG_EXCHANGE,
routing_key=CONFIGS_STORE_INPUT_ROUTING_KEY,
body=self.test_data_str, is_body_dict=False,
properties=pika.BasicProperties(delivery_mode=2), mandatory=False)
# Re-declare queue to get the number of messages
res = self.test_store.rabbitmq.queue_declare(
CONFIGS_STORE_INPUT_QUEUE_NAME, False, True, False, False)
self.assertEqual(1, res.method.message_count)
@parameterized.expand([
("self.nodes_config_1", "self.routing_key_1"),
("self.nodes_config_2", "self.routing_key_12",),
("self.alerts_config_1", "self.routing_key_2"),
("self.alerts_config_1", "self.routing_key_5"),
("self.alerts_config_1", "self.routing_key_13"),
("self.evm_nodes_config", "self.routing_key_16"),
("self.github_repos_config_1", "self.routing_key_3"),
("self.github_repos_config_2", "self.routing_key_4"),
("self.systems_config_1", "self.routing_key_6"),
("self.systems_config_2", "self.routing_key_15",),
("self.email_config_1", "self.routing_key_7"),
("self.pagerduty_config_1", "self.routing_key_8"),
("self.opsgenie_config_1", "self.routing_key_9"),
("self.telegram_config_1", "self.routing_key_10"),
("self.twilio_config_1", "self.routing_key_11"),
])
@mock.patch(
"src.data_store.stores.config.ConfigStore"
"._process_redis_store_chain_monitorables",
autospec=True)
@mock.patch("src.data_store.stores.store.RabbitMQApi.basic_ack",
autospec=True)
@mock.patch("src.data_store.stores.store.Store._send_heartbeat",
autospec=True)
def test_process_data_saves_in_redis(
self, mock_config_data, mock_routing_key, mock_send_hb, mock_ack,
mock_store_chain) -> None:
self.rabbitmq.connect()
mock_ack.return_value = None
data = eval(mock_config_data)
routing_key = eval(mock_routing_key)
self.test_store._initialise_rabbitmq()
blocking_channel = self.test_store.rabbitmq.channel
method_chains = pika.spec.Basic.Deliver(
routing_key=eval(mock_routing_key))
properties = pika.spec.BasicProperties()
self.test_store._process_data(blocking_channel, method_chains,
properties, json.dumps(data))
mock_ack.assert_called_once()
mock_send_hb.assert_called_once()
self.assertEqual(data, json.loads(
self.redis.get(Keys.get_config(routing_key)).decode("utf-8")))
mock_store_chain.assert_called_once()
@freeze_time("2012-01-01")
@mock.patch(
"src.data_store.stores.config.ConfigStore"
"._process_redis_store_chain_monitorables",
autospec=True)
@mock.patch("src.data_store.stores.store.RabbitMQApi.basic_ack",
autospec=True)
@mock.patch("src.data_store.stores.config.ConfigStore._process_redis_store",
autospec=True)
def test_process_data_sends_heartbeat_correctly(
self, mock_process_redis_store, mock_basic_ack, mock_store_chains
) -> None:
mock_basic_ack.return_value = None
| |
from kivy.app import App
from kivy.lang import Builder
from kivy.core.window import Window
from kivy.uix.gridlayout import GridLayout
from kivy.properties import ObjectProperty
from kivy.clock import Clock
from kivy.graphics.texture import Texture
import cv2
import numpy as np
import tensorflow.keras as keras
import tensorflow as tf
# Custom module for miscellaneous utility classes to support a GUI.
from utils.folder_functions import UserPath
from utils.write_hdf5 import StreamToHDF5
from utils.data_functions import DataUtils
from arduino.python_arduino import Arduino
# Are we using the RealSense API? If not, OpenCV webcam API
try:
import pyrealsense2 as rs
except ImportError:
rs = None
# Servo Pin Numbers
STEERING_SERVO = 9
THROTTLE_SERVO = 10
# Using TensorRT?
"""
For now, this is decided without exposure to the user since the Jetson Nano is
virtually useless without TensorRT parsing. We may decide to incorporate this
into the UI.
"""
USE_TRT = True
# Layout files for GUI sub-panels
Builder.load_file('kvSubPanels/camctrls.kv')
Builder.load_file('kvSubPanels/vehiclestatus.kv')
Builder.load_file('kvSubPanels/pwmsettings.kv')
Builder.load_file('kvSubPanels/powerctrls.kv')
Builder.load_file('kvSubPanels/filediag.kv')
Builder.load_file('kvSubPanels/statusbar.kv')
class EngineApp(App):
def __init__(self):
"""
AI framework that defines the drive system.
Please study the 'drive_loop' method first to determine
how the primary systems are interconnected.
"""
App.__init__(self)
# Parameters
self.rc_mode = None
self.drive_loop_buffer_fps = None
self.inference_loop_buffer_fps = None
self.camera_buffer_fps = None
self.arduino_board = None
self.file_IO = None
self.stream_to_file = None
self.model = None
self.image_buffer = None
self.prediction = None
self.inference_method = None
self.get_frame = None
self.car_name = "miniAutonomous"
self.drive_mode = 'Manual'
self.data_utils = DataUtils()
self.camera_real_rate = 0
self.inference_real_rate = 0
self.recording_image_width = 0
self.recording_image_height = 0
self.nn_image_width = 0
self.nn_image_height = 0
self.sequence_length = 0
# Use webcam or realsense camera
self.use_webcam = False
# RealSense camera pipeline
if rs is not None:
self.rs_pipeline = rs.pipeline()
self.rs_config = rs.config()
else:
self.rs_pipeline = None
self.rs_config = None
self.rs_is_on = False
# Webcam option
self.webcam_feed = None
self.webcam_on = False
# Arduino connected?
self.board_available = False
# Are we recording?
self.record_on = False
# Net loaded?
self.net_loaded = False
# Log folder selected?
self.log_folder_selected = False
# Was the car previously recording data
self.previously_recording = False
# Is the car speaking to you?
# Just checking that you are reading the comments...
# Set a variety of default values
self._set_defaults()
def _set_defaults(self):
"""
Set default values for various numeric parameters.
"""
# Set the desired rate of the drive loop
self.drive_loop_rate = 30
# Number of channels of input image
self.color_depth = 3
# Length of buffer reel (i.e. how many values are used in moving avg)
self.moving_avg_length = 100
# NN input parameters
self.recording_image_width = 120
self.recording_image_height = 90
# For RNNs, define the sequence length
self.sequence_length = 5
# Creation of buffer arrays
"""
Please Note:
These two buffers help provide a moving average of the frame rate
at which the overall framework operates, (input image -> inference -> output command),
and that at which the camera is operating, (basic FPS of camera).
These are important to determine if the vehicles drive system is operating
at an optimal rate, which should be close to realtime, (~30 fps).
"""
self.drive_loop_buffer_fps = np.full(self.moving_avg_length,
1 * int(self.drive_loop_rate))
self.inference_loop_buffer_fps = np.full(self.moving_avg_length,
1 * int(self.drive_loop_rate))
self.camera_buffer_fps = np.full(self.moving_avg_length,
1 * int(self.drive_loop_rate))
def build(self):
"""
This is the first method that Kivy calls to build the GUI.
Please note:
(i) User input for loading a file is tracked via 'file_IO' to keep a history
of the prior use of the app and use those selections as the default
selections for the current instance
(ii) The main app class and the GUI class pass references of themselves to each
other to facilitate exchange of parameters.
"""
self.title = 'EngineAppGUI'
self.icon = 'img/logoTitleBarV2_32x32.png'
self.file_IO = UserPath('EngineApp.py')
self.ui = EngineAppGUI(self) # noqa
# Stream file object to record data
self.stream_to_file = StreamToHDF5(self.recording_image_width,
self.recording_image_height,
self.ui.steering_max,
self.ui.steering_min,
self.ui.throttle_neutral,
self.ui.throttle_max,
self.ui.throttle_min)
return self.ui
def drive_loop(self, dt: int):
"""
Main loop that drives the AI framework, from here forwards
referred to as drive system. (Because that's how we roll.)
This is the most critical method of the App class and should
be the first lines of code studied if you wish to get a firm
grip on the code base.
Parameters
----------
dt: (int) time step given at 1/dt
"""
self.drive_loop_buffer_fps, fp_avg =\
self.data_utils.moving_avg(self.drive_loop_buffer_fps, 1 / dt)
# Create a message stream to inform the user of current status/performance
self.root.vehStatus.loopFps.text = f'Primary Loop (FPS): {fp_avg:3.0f}'
# Run the camera
self.run_camera()
# Display camera fps
self.root.vehStatus.camFps.text = f'Camera Loop (FPS): {self.camera_real_rate:3.0f}'
"""
Now that the camera is running, the image it produces is available
to all methods via 'self.ui.primary_image'.
This indicates we are using the same image to record or run inference
on that the user sees from the UI.
"""
# Check the desired mode
"""
We are using the five channel options (TQi4ch)
"""
mode_pwm = self.arduino_board.mode_in()
full_ai_pwm = self.arduino_board.full_ai_in()
# Set the vehicle to manual or autonomous
if mode_pwm < 1500:
self.drive_mode = 'Manual'
elif mode_pwm > 1500:
if full_ai_pwm < 1500:
# Steering is autonomous, but manual throttle
self.drive_mode = 'Steering Autonomous'
else:
# Both steering and throttle are autonomous
self.drive_mode = 'Full Autonomous'
# Display mode
ui_messages = f'Mode: {self.drive_mode}={mode_pwm:3.0f}'
ui_messages += f', Full AI PWM = {full_ai_pwm: 3.0f}'
# Are we recording?
"""
Please note:
Here we are using a five channel transmitter/receiver,
so the option to record from the camera has been separated from
the drive mode. You can therefore record to create training
data, (manual driving), or you can record to show the vehicle
driving itself from the perspective of the vehicle.
"""
record_pwm = self.arduino_board.rec_in()
if record_pwm < 1500:
self.record_on = False
else:
self.record_on = True
# Display record option
ui_messages += f', Record Mode: {self.record_on}={record_pwm:3.0f}'
# Drive the car
if self.drive_mode == 'Manual':
steering_output, throttle_output = self.drive_manual()
ui_messages += f', Steering: {steering_output}, Throttle: {throttle_output}'
# Or have the car drive itself
else:
# Check first if a network is loaded
if self.net_loaded:
steering_output, throttle_output = self.drive_autonomous()
ui_messages += f', Steering: {steering_output}, Throttle: {throttle_output}'
else:
ui_messages = f'You need to load a network before driving autonomously!'
steering_output, throttle_output = self.drive_manual()
# Record data
if self.record_on and self.log_folder_selected:
# Initiate a thread for writing to a data file
self.stream_to_file.initiate_stream()
# Resize the image to be saved for training
record_image = cv2.resize(self.ui.primary_image,
(self.recording_image_width, self.recording_image_height))
self.stream_to_file.log_queue.put((self.stream_to_file.frame_index,
fp_avg,
steering_output,
throttle_output,
record_image))
self.stream_to_file.frame_index += 1
# The vehicle is now recording
self.previously_recording = True
# Update the UI
self.root.powerCtrls.recording.bgnColor = [0, 1, 0, 1]
elif not self.record_on and self.previously_recording is True:
# Close a file stream if one was open and the user requested it be closed
self.stream_to_file.close_log_file()
self.previously_recording = False
# Reset the frame index to zero in case the user wants to restart recording
self.stream_to_file.frame_index = 0
# Update the UI
self.root.powerCtrls.recording.bgnColor = [0.7, 0.7, 0.7, 1]
# Send the message stream to the UI
self.root.statusBar.lblStatusBar.text = ui_messages
def drive_manual(self):
"""
Manual driving option.
Returns
-------
steering_output: (int) desired steering output
throttle_output: (int) desired throttle output
"""
# Steering
steering_output = self.arduino_board.steer_in()
# Clip to range if required
steering_output = self.data_utils.chop_value(steering_output,
self.ui.steering_min,
self.ui.steering_max)
self.arduino_board.Servos.write(STEERING_SERVO, steering_output)
# Throttle
throttle_output = self.arduino_board.throttle_in()
throttle_output = self.data_utils.chop_value(throttle_output,
self.ui.throttle_min,
self.ui.throttle_max)
self.arduino_board.Servos.write(THROTTLE_SERVO, throttle_output)
# Update UI
self.root.powerCtrls.manual.bgnColor = [0, 1, 0, 1]
self.root.powerCtrls.ai_steering.bgnColor = [0.7, 0.7, 0.7, 1]
self.root.powerCtrls.ai_full.bgnColor = [0.7, 0.7, 0.7, 1]
return steering_output, throttle_output
def drive_autonomous(self):
"""
Drive the vehicle by doing things autonomously.
Returns
-------
steering_output: (int) inference-based steering output
throttle_output: (int) inference or driver-based throttle output
"""
# Resize the image to be compatible with neural network
new_image = cv2.resize(self.ui.primary_image, (self.nn_image_width, self.nn_image_height))
# Perform inference
drive_inference = self.inference_method(new_image)
# Get the inference rate
delta_inference_fps = self.data_utils.get_timer()
self.inference_loop_buffer_fps, fps_avg = self.data_utils.moving_avg(self.inference_loop_buffer_fps,
1 / delta_inference_fps)
self.inference_real_rate = round(fps_avg, 1)
# Post the timing to the UI
self.root.vehStatus.inferenceFps.text = f'Inference Loop (FPS): {self.inference_real_rate:3.0f}'
"""
Model produces inferences from -100 to 100 for steering and 0 to 100 for throttle,
so we need to rescale these to the current PWM ranges.
"""
rescaled_steering = self.data_utils.map_function(drive_inference[0],
[-100, 100,
self.ui.steering_min,
self.ui.steering_max])
self.arduino_board.Servos.write(STEERING_SERVO, rescaled_steering)
# Now determine the throttle
if self.drive_mode == 'Steering | |
<filename>apps/fund/migrations/0005_migrate_paymentlogs.py
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
from bluebottle.utils.model_dispatcher import get_model_mapping
MODEL_MAP = get_model_mapping()
class Migration(DataMigration):
depends_on = (
('bluebottle.payments_logger', '0001_initial'),
('bluebottle.payments_docdata', '0002_auto__add_field_docdatapayment_customer_id__add_field_docdatapayment_e'),
)
def forwards(self, orm):
# Note: Don't use "from appname.models import ModelName".
# Use orm.ModelName to refer to models in this application,
# and orm['appname.ModelName'] for models in other applications.
# Create a lookup for new docdata payments
ddps = orm['payments_docdata.DocdataPayment'].objects.all().values_list('payment_cluster_id', 'id')
dd_payments = {}
for ddp in ddps:
dd_payments[ddp[0]] = ddp[1]
count = 0
total = orm['cowry_docdata.DocDataPaymentLogEntry'].objects.count()
for i, log_entry_model in enumerate(orm['cowry_docdata.DocDataPaymentLogEntry'].objects.iterator()):
if not i % 50:
print "Processing DocdataPaymentLogEntry {0} of {1}".format(i, total)
# Fetch DocDataPaymentOrder
old_docdata_payment_order = log_entry_model.docdata_payment_order
# Fetch corresponding DocdataPayment
if old_docdata_payment_order.merchant_order_reference in dd_payments:
new_docdata_payment_id = dd_payments[old_docdata_payment_order.merchant_order_reference]
else:
count +=1
msg = "No new DocdataPayment object found for the old DocdataPaymentOrder object. DocdataPaymentOrder ID: {0} DocDataPaymentLogEntry ID: {1}".format(old_docdata_payment_order.id, log_entry_model.id)
print msg
continue
# Create new PaymentLogEntry using the old DocDataPaymentLogEntry data
payment_log_entry = orm['payments_logger.PaymentLogEntry'].objects.create(
message=log_entry_model.message,
level=log_entry_model.level,
timestamp=log_entry_model.timestamp,
payment_id=new_docdata_payment_id
)
payment_log_entry.save()
if not i % 50:
print "PaymentLogEntry {0} created".format(i)
print "PaymentLogEntries without DocdataPayment: {0}".format(count)
def backwards(self, orm):
orm['payments_logger.PaymentLogEntry'].objects.all().delete()
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'bb_projects.projectphase': {
'Meta': {'ordering': "['sequence']", 'object_name': 'ProjectPhase'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '400', 'blank': 'True'}),
'editable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'owner_editable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sequence': ('django.db.models.fields.IntegerField', [], {'unique': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '200'}),
'viewable': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'bb_projects.projecttheme': {
'Meta': {'ordering': "['name']", 'object_name': 'ProjectTheme'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'name_nl': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'cowry.payment': {
'Meta': {'object_name': 'Payment'},
'amount': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '3'}),
'fee': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'payments'", 'to': u"orm['fund.Order']"}),
'payment_method_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '20', 'blank': 'True'}),
'payment_submethod_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '20', 'blank': 'True'}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'polymorphic_cowry.payment_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'status': ('django.db.models.fields.CharField', [], {'default': "'new'", 'max_length': '15', 'db_index': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'})
},
u'cowry_docdata.docdatapayment': {
'Meta': {'ordering': "('-created', '-updated')", 'object_name': 'DocDataPayment'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'docdata_payment_order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'docdata_payments'", 'to': u"orm['cowry_docdata.DocDataPaymentOrder']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'payment_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'payment_method': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '60', 'blank': 'True'}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'polymorphic_cowry_docdata.docdatapayment_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'status': ('django.db.models.fields.CharField', [], {'default': "'NEW'", 'max_length': '30'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'})
},
u'cowry_docdata.docdatapaymentlogentry': {
'Meta': {'ordering': "('-timestamp',)", 'object_name': 'DocDataPaymentLogEntry'},
'docdata_payment_order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'log_entries'", 'to': u"orm['cowry_docdata.DocDataPaymentOrder']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.CharField', [], {'max_length': '15'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '400'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'})
},
u'cowry_docdata.docdatapaymentorder': {
'Meta': {'ordering': "('-created', '-updated')", 'object_name': 'DocDataPaymentOrder', '_ormbases': [u'cowry.Payment']},
'address': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200'}),
'city': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2'}),
'customer_id': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'email': ('django.db.models.fields.EmailField', [], {'default': "''", 'max_length': '254'}),
'first_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200'}),
'language': ('django.db.models.fields.CharField', [], {'default': "'en'", 'max_length': '2'}),
'last_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200'}),
'merchant_order_reference': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'payment_order_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
u'payment_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['cowry.Payment']", 'unique': 'True', 'primary_key': 'True'}),
'postal_code': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '20'})
},
u'cowry_docdata.docdatawebdirectdirectdebit': {
'Meta': {'ordering': "('-created', '-updated')", 'object_name': 'DocDataWebDirectDirectDebit', '_ormbases': [u'cowry_docdata.DocDataPayment']},
'account_city': ('django.db.models.fields.CharField', [], {'max_length': '35'}),
'account_name': ('django.db.models.fields.CharField', [], {'max_length': '35'}),
'bic': ('django_iban.fields.SWIFTBICField', [], {'max_length': '11'}),
u'docdatapayment_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['cowry_docdata.DocDataPayment']", 'unique': 'True', 'primary_key': 'True'}),
'iban': ('django_iban.fields.IBANField', [], {'max_length': '34'})
},
u'fund.donation': {
'Meta': {'object_name': MODEL_MAP['donation']['class']},
'amount': ('django.db.models.fields.PositiveIntegerField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'EUR'", 'max_length': '3'}),
'donation_type': ('django.db.models.fields.CharField', [], {'default': "'one_off'", 'max_length': '20', 'db_index': 'True'}),
'fundraiser': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'old_donations'", 'null': 'True', 'to': "orm['{0}']".format(MODEL_MAP['fundraiser']['model'])}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'donations'", 'null': 'True', 'to': u"orm['fund.Order']"}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'old_donations'", 'to': "orm['{0}']".format(MODEL_MAP['project']['model'])}),
'ready': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'new'", 'max_length': '20', 'db_index': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['{0}']".format(MODEL_MAP['user']['model']), 'null': 'True', 'blank': 'True'}),
'voucher': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['vouchers.Voucher']", 'null': 'True', 'blank': 'True'})
},
u'fund.order': {
'Meta': {'ordering': "('-updated',)", 'object_name': MODEL_MAP['order']['class']},
'closed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order_number': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30', 'db_index': 'True'}),
'recurring': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'current'", 'max_length': '20', 'db_index': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'old_orders'", 'null': 'True', 'to': "orm['{0}']".format(MODEL_MAP['user']['model'])})
},
u'fund.recurringdirectdebitpayment': {
'Meta': {'object_name': 'RecurringDirectDebitPayment'},
'account': ('apps.fund.fields.DutchBankAccountField', [], {'max_length': '10'}),
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'amount': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'bic': ('django_iban.fields.SWIFTBICField', [], {'default': "''", 'max_length': '11', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '35'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'EUR'", 'max_length': '3'}),
'iban': ('django_iban.fields.IBANField', [], {'default': "''", 'max_length': '34', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'manually_process': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '35'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['{0}']".format(MODEL_MAP['user']['model']), 'unique': 'True'})
},
MODEL_MAP['fundraiser']['model_lower']: {
'Meta': {'object_name': MODEL_MAP['fundraiser']['class']},
'amount': ('django.db.models.fields.DecimalField', [], {'max_digits': '10', 'decimal_places': '2'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'EUR'", 'max_length': "'10'"}),
'deadline': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'deleted': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['{0}']".format(MODEL_MAP['user']['model'])}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['{0}']".format(MODEL_MAP['project']['model'])}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'video_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '100', 'blank': 'True'})
},
u'geo.country': {
'Meta': {'ordering': "['name']", 'object_name': 'Country'},
'alpha2_code': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'alpha3_code': ('django.db.models.fields.CharField', [], {'max_length': '3', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'numeric_code': ('django.db.models.fields.CharField', [], {'max_length': '3', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'oda_recipient': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subregion': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['geo.SubRegion']"})
},
u'geo.region': {
'Meta': {'ordering': "['name']", 'object_name': 'Region'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'numeric_code': ('django.db.models.fields.CharField', [], {'max_length': '3', 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
u'geo.subregion': {
'Meta': {'ordering': "['name']", 'object_name': 'SubRegion'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'numeric_code': ('django.db.models.fields.CharField', [], {'max_length': '3', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['geo.Region']"})
},
MODEL_MAP['user']['model_lower']: {
'Meta': {'object_name': MODEL_MAP['user']['class']},
'about': ('django.db.models.fields.TextField', [], {'max_length': '265', 'blank': 'True'}),
'available_time': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'birthdate': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'deleted': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'disable_token': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '254', 'db_index': 'True'}),
'facebook': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '6', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': | |
<filename>dataset2.py
from typing import Optional, Union, Callable, List
from collections import OrderedDict
import random
import tensorflow as tf
import numpy as np
import string
import addressnet.lookups as lookups
from addressnet.typo import generate_typo
# Schema used to decode data from the TFRecord file
_features = OrderedDict([
('Building_name', tf.FixedLenFeature([], tf.string)),
('Block', tf.FixedLenFeature([], tf.string)),
('Level', tf.FixedLenFeature([], tf.string)),
('Unit', tf.FixedLenFeature([], tf.string)),
('House_number', tf.FixedLenFeature([], tf.string)),
('Street_name', tf.FixedLenFeature([], tf.string)),
('Township', tf.FixedLenFeature([], tf.string)),
('Locality_name', tf.FixedLenFeature([], tf.string)),
('State', tf.FixedLenFeature([], tf.string)),
('Postcode', tf.FixedLenFeature([], tf.int64)),
])
# List of fields used as labels in the training data
labels_list = [
'Building_name', # 1
'Block', # 2
'Level', # 3
'Unit', # 4
'House_number', # 5
'Street_name', # 6
'Township', # 7
'Locality_name', # 8
'State', # 9
'Postcode', # 10
]
# Number of labels in total (+1 for the blank category)
n_labels = len(labels_list) + 1
# Allowable characters for the encoded representation
vocab = list(string.digits + string.ascii_lowercase + string.punctuation + string.whitespace)
def vocab_lookup(characters: str) -> (int, np.ndarray):
"""
Converts a string into a list of vocab indices
:param characters: the string to convert
:param training: if True, artificial typos will be introduced
:return: the string length and an array of vocab indices
"""
result = list()
for c in characters.lower():
try:
result.append(vocab.index(c) + 1)
except ValueError:
result.append(0)
return len(characters), np.array(result, dtype=np.int64)
def decode_data(record: List[Union[str, int, float]]) -> Union[str, int, float]:
"""
Decodes a record from the tfrecord file by converting all strings to UTF-8 encoding, and any numeric field with
a value of -1 to None.
:param record: the record to decode
:return: an iterator for yielding the decoded fields
"""
for item in record:
try:
# Attempt to treat the item in the record as a string
yield item.decode("UTF-8")
except AttributeError:
# Treat the item as a number and encode -1 as None (see generate_tf_records.py)
yield item if item != -1 else None
def labels(text: Union[str, int], field_name: Optional[str], mutate: bool = True) -> (str, np.ndarray):
"""
Generates a numpy matrix labelling each character by field type. Strings have artificial typos introduced if
mutate == True
:param text: the text to label
:param field_name: the name of the field to which the text belongs, or None if the label is blank
:param mutate: introduce artificial typos
:return: the original text and the numpy matrix of labels
"""
# Ensure the input is a string, encoding None to an empty to string
if text is None:
text = ''
else:
# Introduce artificial typos if mutate == True
text = generate_typo(str(text)) if mutate else str(text)
labels_matrix = np.zeros((len(text), n_labels), dtype=np.bool)
# If no field is supplied, then encode the label using the blank category
if field_name is None:
labels_matrix[:, 0] = True
else:
labels_matrix[:, labels_list.index(field_name) + 1] = True
return text, labels_matrix
def random_separator(min_length: int = 1, max_length: int = 3, possible_sep_chars: Optional[str] = r",./\ ") -> str:
"""
Generates a space-padded separator of random length using a random character from possible_sep_chars
:param min_length: minimum length of the separator
:param max_length: maximum length of the separator
:param possible_sep_chars: string of possible characters to use for the separator
:return: the separator string
"""
chars = [" "] * random.randint(min_length, max_length)
if len(chars) > 0 and possible_sep_chars:
sep_char = random.choice(possible_sep_chars)
chars[random.randrange(len(chars))] = sep_char
return ''.join(chars)
def join_labels(lbls: [np.ndarray], sep: Union[str, Callable[..., str]] = " ") -> np.ndarray:
"""
Concatenates a series of label matrices with a separator
:param lbls: a list of numpy matrices
:param sep: the separator string or function that returns the sep string
:return: the concatenated labels
"""
if len(lbls) < 2:
return lbls
joined_labels = None
sep_str = None
# if `sep` is not a function, set the separator (`sep_str`) to `sep`, otherwise leave as None
if not callable(sep):
sep_str = sep
for l in lbls:
if joined_labels is None:
joined_labels = l
else:
# If `sep` is a function, call it on each iteration
if callable(sep):
sep_str = sep()
# Skip zero-length labels
if l.shape[0] == 0:
continue
elif sep_str is not None and len(sep_str) > 0 and joined_labels.shape[0] > 0:
# Join using sep_str if it's present and non-zero in length
joined_labels = np.concatenate([joined_labels, labels(sep_str, None, mutate=False)[1], l], axis=0)
else:
# Otherwise, directly concatenate the labels
joined_labels = np.concatenate([joined_labels, l], axis=0)
assert joined_labels is not None, "No labels were joined!"
assert joined_labels.shape[1] == n_labels, "The number of labels generated was unexpected: got %i but wanted %i" % (
joined_labels.shape[1], n_labels)
return joined_labels
def join_str_and_labels(parts: [(str, np.ndarray)], sep: Union[str, Callable[..., str]] = " ") -> (str, np.ndarray):
"""
Joins the strings and labels using the given separator
:param parts: a list of string/label tuples
:param sep: a string or function that returns the string to be used as a separator
:return: the joined string and labels
"""
# Keep only the parts with strings of length > 0
parts = [p for p in parts if len(p[0]) > 0]
# If there are no parts at all, return an empty string an array of shape (0, n_labels)
if len(parts) == 0:
return '', np.zeros((0, n_labels))
# If there's only one part, just give it back as-is
elif len(parts) == 1:
return parts[0]
# Pre-generate the separators - this is important if `sep` is a function returning non-deterministic results
n_sep = len(parts) - 1
if callable(sep):
seps = [sep() for _ in range(n_sep)]
else:
seps = [sep] * n_sep
seps += ['']
# Join the strings using the list of separators
strings = ''.join(sum([(s[0][0], s[1]) for s in zip(parts, seps)], ()))
# Join the labels using an iterator function
sep_iter = iter(seps)
lbls = join_labels([s[1] for s in parts], sep=lambda: next(sep_iter))
assert len(strings) == lbls.shape[0], "string length %i (%s), label length %i using sep %s" % (
len(strings), strings, lbls.shape[0], seps)
return strings, lbls
def choose(option1: Callable = lambda: None, option2: Callable = lambda: None):
"""
Randomly run either option 1 or option 2
:param option1: a possible function to run
:param option2: another possible function to run
:return: the result of the function
"""
if random.getrandbits(1):
return option1()
else:
return option2()
def synthesise_address(*record) -> (int, np.ndarray, np.ndarray):
"""
Uses the record information to construct a formatted address with labels. The addresses generated involve
semi-random permutations and corruptions to help avoid over-fitting.
:param record: the decoded item from the TFRecord file
:return: the address string length, encoded text and labels
"""
fields = dict(zip(_features.keys(), decode_data(record)))
# Generate the individual address components:
'''
if fields['level_type'] > 0:
level = generate_level_number(fields['level_type'], fields['level_number_prefix'], fields['level_number'],
fields['level_number_suffix'])
else:
level = ('', np.zeros((0, n_labels)))
if fields['flat_type'] > 0:
flat_number = generate_flat_number(
fields['flat_type'], fields['flat_number_prefix'], fields['flat_number'], fields['flat_number_suffix'])
else:
flat_number = ('', np.zeros((0, n_labels)))
street_number = generate_street_number(fields['number_first_prefix'], fields['number_first'],
fields['number_first_suffix'], fields['number_last_prefix'],
fields['number_last'], fields['number_last_suffix'])
street = generate_street_name(fields['street_name'], fields['street_suffix_code'], fields['street_type_code'])
suburb = labels(fields['locality_name'], 'locality_name')
state = generate_state(fields['state_abbreviation'])
postcode = labels(fields['postcode'], 'postcode')
building_name = labels(fields['building_name'], 'building_name')
# Begin composing the formatted address, building up the `parts` variable...
suburb_state_postcode = list()
# Keep the suburb?
choose(lambda: suburb_state_postcode.append(suburb))
# Keep state?
choose(lambda: suburb_state_postcode.append(state))
# Keep postcode?
choose(lambda: suburb_state_postcode.append(postcode))
random.shuffle(suburb_state_postcode)
parts = [[building_name], [level]]
# Keep the street number? (If street number is dropped, the flat number is also dropped)
def keep_street_number():
# force flat number to be next to street number only if the flat number is only digits (i.e. does not have a
# flat type)
if flat_number[0].isdigit():
parts.append([flat_number, street_number, street])
else:
parts.append([flat_number])
parts.append([street_number, street])
choose(keep_street_number, lambda: parts.append([street]))
'''
Building_name = labels(fields['Building_name'], 'Building_name')
Block = labels(fields['Block'], 'Block')
Level = labels(fields['Level'], 'Level')
Unit = labels(fields['Unit'], 'Unit')
House_number = labels(fields['House_number'], 'House_number')
Street_name = labels(fields['Street_name'], 'Street_name')
Township = labels(fields['Township'], 'Township')
Locality_name = labels(fields['Locality_name'], 'Locality_name')
State = labels(fields['State'], 'State')
Postcode = labels(fields['Postcode'], 'Postcode')
unit_address=join_str_and_labels([Block,Level,Unit],sep=lambda: random_separator(1, 2))
Locality_state_postcode = list()
Locality_state_postcode.append(Locality_name)
Locality_state_postcode.append(State)
Locality_state_postcode.append((Postcode))
parts = []
parts.append(Building_name)
parts.append(unit_address)
parts.append(House_number)
parts.append(Street_name)
parts.append(Township)
random.shuffle(parts)
# Suburb, state, postcode is always at the end of an address
parts.append(Locality_state_postcode)
# Flatten the address components into an unnested list
parts = sum(parts, [])
# Join each address component/label with a random separator
address, address_lbl = join_str_and_labels(parts, |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.